aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-03-21 05:42:25 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 08:31:04 -0400
commit457f2180951cdcbfb4657ddcc83b486e93497f56 (patch)
tree4a4b085f2dc4c98810decac658fd0b629acd385e /arch/s390/lib
parent1b948d6caec4f28e3524244ca0f77c6ae8ddceef (diff)
s390/uaccess: rework uaccess code - fix locking issues
The current uaccess code uses a page table walk in some circumstances, e.g. in case of the in atomic futex operations or if running on old hardware which doesn't support the mvcos instruction. However it turned out that the page table walk code does not correctly lock page tables when accessing page table entries. In other words: a different cpu may invalidate a page table entry while the current cpu inspects the pte. This may lead to random data corruption. Adding correct locking however isn't trivial for all uaccess operations. Especially copy_in_user() is problematic since that requires to hold at least two locks, but must be protected against ABBA deadlock when a different cpu also performs a copy_in_user() operation. So the solution is a different approach where we change address spaces: User space runs in primary address mode, or access register mode within vdso code, like it currently already does. The kernel usually also runs in home space mode, however when accessing user space the kernel switches to primary or secondary address mode if the mvcos instruction is not available or if a compare-and-swap (futex) instruction on a user space address is performed. KVM however is special, since that requires the kernel to run in home address space while implicitly accessing user space with the sie instruction. So we end up with: User space: - runs in primary or access register mode - cr1 contains the user asce - cr7 contains the user asce - cr13 contains the kernel asce Kernel space: - runs in home space mode - cr1 contains the user or kernel asce -> the kernel asce is loaded when a uaccess requires primary or secondary address mode - cr7 contains the user or kernel asce, (changed with set_fs()) - cr13 contains the kernel asce In case of uaccess the kernel changes to: - primary space mode in case of a uaccess (copy_to_user) and uses e.g. the mvcp instruction to access user space. However the kernel will stay in home space mode if the mvcos instruction is available - secondary space mode in case of futex atomic operations, so that the instructions come from primary address space and data from secondary space In case of kvm the kernel runs in home space mode, but cr1 gets switched to contain the gmap asce before the sie instruction gets executed. When the sie instruction is finished cr1 will be switched back to contain the user asce. A context switch between two processes will always load the kernel asce for the next process in cr1. So the first exit to user space is a bit more expensive (one extra load control register instruction) than before, however keeps the code rather simple. In sum this means there is no need to perform any error prone page table walks anymore when accessing user space. The patch seems to be rather large, however it mainly removes the the page table walk code and restores the previously deleted "standard" uaccess code, with a couple of changes. The uaccess without mvcos mode can be enforced with the "uaccess_primary" kernel parameter. Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/uaccess.c407
-rw-r--r--arch/s390/lib/uaccess.h16
-rw-r--r--arch/s390/lib/uaccess_mvcos.c263
-rw-r--r--arch/s390/lib/uaccess_pt.c471
5 files changed, 408 insertions, 751 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index e3fffe1dff51..c6d752e8bf28 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for s390-specific library files.. 2# Makefile for s390-specific library files..
3# 3#
4 4
5lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o 5lib-y += delay.o string.o uaccess.o find.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
7obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_SMP) += spinlock.o 8lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
new file mode 100644
index 000000000000..23f866b4c7f1
--- /dev/null
+++ b/arch/s390/lib/uaccess.c
@@ -0,0 +1,407 @@
1/*
2 * Standard user space access functions based on mvcp/mvcs and doing
3 * interesting things in the secondary space mode.
4 *
5 * Copyright IBM Corp. 2006,2014
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
8 */
9
10#include <linux/jump_label.h>
11#include <linux/uaccess.h>
12#include <linux/export.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <asm/mmu_context.h>
16#include <asm/facility.h>
17
18#ifndef CONFIG_64BIT
19#define AHI "ahi"
20#define ALR "alr"
21#define CLR "clr"
22#define LHI "lhi"
23#define SLR "slr"
24#else
25#define AHI "aghi"
26#define ALR "algr"
27#define CLR "clgr"
28#define LHI "lghi"
29#define SLR "slgr"
30#endif
31
32static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
33
34static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
35 unsigned long size)
36{
37 register unsigned long reg0 asm("0") = 0x81UL;
38 unsigned long tmp1, tmp2;
39
40 tmp1 = -4096UL;
41 asm volatile(
42 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
43 "9: jz 7f\n"
44 "1:"ALR" %0,%3\n"
45 " "SLR" %1,%3\n"
46 " "SLR" %2,%3\n"
47 " j 0b\n"
48 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
49 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
50 " "SLR" %4,%1\n"
51 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
52 " jnh 4f\n"
53 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
54 "10:"SLR" %0,%4\n"
55 " "ALR" %2,%4\n"
56 "4:"LHI" %4,-1\n"
57 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
58 " bras %3,6f\n" /* memset loop */
59 " xc 0(1,%2),0(%2)\n"
60 "5: xc 0(256,%2),0(%2)\n"
61 " la %2,256(%2)\n"
62 "6:"AHI" %4,-256\n"
63 " jnm 5b\n"
64 " ex %4,0(%3)\n"
65 " j 8f\n"
66 "7:"SLR" %0,%0\n"
67 "8:\n"
68 EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
69 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
70 : "d" (reg0) : "cc", "memory");
71 return size;
72}
73
74static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
75 unsigned long size)
76{
77 unsigned long tmp1, tmp2;
78
79 update_primary_asce(current);
80 tmp1 = -256UL;
81 asm volatile(
82 " sacf 0\n"
83 "0: mvcp 0(%0,%2),0(%1),%3\n"
84 "10:jz 8f\n"
85 "1:"ALR" %0,%3\n"
86 " la %1,256(%1)\n"
87 " la %2,256(%2)\n"
88 "2: mvcp 0(%0,%2),0(%1),%3\n"
89 "11:jnz 1b\n"
90 " j 8f\n"
91 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
92 " "LHI" %3,-4096\n"
93 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
94 " "SLR" %4,%1\n"
95 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
96 " jnh 5f\n"
97 "4: mvcp 0(%4,%2),0(%1),%3\n"
98 "12:"SLR" %0,%4\n"
99 " "ALR" %2,%4\n"
100 "5:"LHI" %4,-1\n"
101 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
102 " bras %3,7f\n" /* memset loop */
103 " xc 0(1,%2),0(%2)\n"
104 "6: xc 0(256,%2),0(%2)\n"
105 " la %2,256(%2)\n"
106 "7:"AHI" %4,-256\n"
107 " jnm 6b\n"
108 " ex %4,0(%3)\n"
109 " j 9f\n"
110 "8:"SLR" %0,%0\n"
111 "9: sacf 768\n"
112 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
113 EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
114 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
115 : : "cc", "memory");
116 return size;
117}
118
119unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
120{
121 if (static_key_false(&have_mvcos))
122 return copy_from_user_mvcos(to, from, n);
123 return copy_from_user_mvcp(to, from, n);
124}
125EXPORT_SYMBOL(__copy_from_user);
126
127static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
128 unsigned long size)
129{
130 register unsigned long reg0 asm("0") = 0x810000UL;
131 unsigned long tmp1, tmp2;
132
133 tmp1 = -4096UL;
134 asm volatile(
135 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
136 "6: jz 4f\n"
137 "1:"ALR" %0,%3\n"
138 " "SLR" %1,%3\n"
139 " "SLR" %2,%3\n"
140 " j 0b\n"
141 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
142 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
143 " "SLR" %4,%1\n"
144 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
145 " jnh 5f\n"
146 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
147 "7:"SLR" %0,%4\n"
148 " j 5f\n"
149 "4:"SLR" %0,%0\n"
150 "5:\n"
151 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
152 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
153 : "d" (reg0) : "cc", "memory");
154 return size;
155}
156
157static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
158 unsigned long size)
159{
160 unsigned long tmp1, tmp2;
161
162 update_primary_asce(current);
163 tmp1 = -256UL;
164 asm volatile(
165 " sacf 0\n"
166 "0: mvcs 0(%0,%1),0(%2),%3\n"
167 "7: jz 5f\n"
168 "1:"ALR" %0,%3\n"
169 " la %1,256(%1)\n"
170 " la %2,256(%2)\n"
171 "2: mvcs 0(%0,%1),0(%2),%3\n"
172 "8: jnz 1b\n"
173 " j 5f\n"
174 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
175 " "LHI" %3,-4096\n"
176 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
177 " "SLR" %4,%1\n"
178 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
179 " jnh 6f\n"
180 "4: mvcs 0(%4,%1),0(%2),%3\n"
181 "9:"SLR" %0,%4\n"
182 " j 6f\n"
183 "5:"SLR" %0,%0\n"
184 "6: sacf 768\n"
185 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
186 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
187 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
188 : : "cc", "memory");
189 return size;
190}
191
192unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
193{
194 if (static_key_false(&have_mvcos))
195 return copy_to_user_mvcos(to, from, n);
196 return copy_to_user_mvcs(to, from, n);
197}
198EXPORT_SYMBOL(__copy_to_user);
199
200static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
201 unsigned long size)
202{
203 register unsigned long reg0 asm("0") = 0x810081UL;
204 unsigned long tmp1, tmp2;
205
206 tmp1 = -4096UL;
207 /* FIXME: copy with reduced length. */
208 asm volatile(
209 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
210 " jz 2f\n"
211 "1:"ALR" %0,%3\n"
212 " "SLR" %1,%3\n"
213 " "SLR" %2,%3\n"
214 " j 0b\n"
215 "2:"SLR" %0,%0\n"
216 "3: \n"
217 EX_TABLE(0b,3b)
218 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
219 : "d" (reg0) : "cc", "memory");
220 return size;
221}
222
223static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
224 unsigned long size)
225{
226 unsigned long tmp1;
227
228 update_primary_asce(current);
229 asm volatile(
230 " sacf 256\n"
231 " "AHI" %0,-1\n"
232 " jo 5f\n"
233 " bras %3,3f\n"
234 "0:"AHI" %0,257\n"
235 "1: mvc 0(1,%1),0(%2)\n"
236 " la %1,1(%1)\n"
237 " la %2,1(%2)\n"
238 " "AHI" %0,-1\n"
239 " jnz 1b\n"
240 " j 5f\n"
241 "2: mvc 0(256,%1),0(%2)\n"
242 " la %1,256(%1)\n"
243 " la %2,256(%2)\n"
244 "3:"AHI" %0,-256\n"
245 " jnm 2b\n"
246 "4: ex %0,1b-0b(%3)\n"
247 "5: "SLR" %0,%0\n"
248 "6: sacf 768\n"
249 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
250 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
251 : : "cc", "memory");
252 return size;
253}
254
255unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
256{
257 if (static_key_false(&have_mvcos))
258 return copy_in_user_mvcos(to, from, n);
259 return copy_in_user_mvc(to, from, n);
260}
261EXPORT_SYMBOL(__copy_in_user);
262
263static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
264{
265 register unsigned long reg0 asm("0") = 0x810000UL;
266 unsigned long tmp1, tmp2;
267
268 tmp1 = -4096UL;
269 asm volatile(
270 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
271 " jz 4f\n"
272 "1:"ALR" %0,%2\n"
273 " "SLR" %1,%2\n"
274 " j 0b\n"
275 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
276 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
277 " "SLR" %3,%1\n"
278 " "CLR" %0,%3\n" /* copy crosses next page boundary? */
279 " jnh 5f\n"
280 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
281 " "SLR" %0,%3\n"
282 " j 5f\n"
283 "4:"SLR" %0,%0\n"
284 "5:\n"
285 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
286 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
287 : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
288 return size;
289}
290
291static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
292{
293 unsigned long tmp1, tmp2;
294
295 update_primary_asce(current);
296 asm volatile(
297 " sacf 256\n"
298 " "AHI" %0,-1\n"
299 " jo 5f\n"
300 " bras %3,3f\n"
301 " xc 0(1,%1),0(%1)\n"
302 "0:"AHI" %0,257\n"
303 " la %2,255(%1)\n" /* %2 = ptr + 255 */
304 " srl %2,12\n"
305 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
306 " "SLR" %2,%1\n"
307 " "CLR" %0,%2\n" /* clear crosses next page boundary? */
308 " jnh 5f\n"
309 " "AHI" %2,-1\n"
310 "1: ex %2,0(%3)\n"
311 " "AHI" %2,1\n"
312 " "SLR" %0,%2\n"
313 " j 5f\n"
314 "2: xc 0(256,%1),0(%1)\n"
315 " la %1,256(%1)\n"
316 "3:"AHI" %0,-256\n"
317 " jnm 2b\n"
318 "4: ex %0,0(%3)\n"
319 "5: "SLR" %0,%0\n"
320 "6: sacf 768\n"
321 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
322 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
323 : : "cc", "memory");
324 return size;
325}
326
327unsigned long __clear_user(void __user *to, unsigned long size)
328{
329 if (static_key_false(&have_mvcos))
330 return clear_user_mvcos(to, size);
331 return clear_user_xc(to, size);
332}
333EXPORT_SYMBOL(__clear_user);
334
335static inline unsigned long strnlen_user_srst(const char __user *src,
336 unsigned long size)
337{
338 register unsigned long reg0 asm("0") = 0;
339 unsigned long tmp1, tmp2;
340
341 if (unlikely(!size))
342 return 0;
343 update_primary_asce(current);
344 asm volatile(
345 " la %2,0(%1)\n"
346 " la %3,0(%0,%1)\n"
347 " "SLR" %0,%0\n"
348 " sacf 256\n"
349 "0: srst %3,%2\n"
350 " jo 0b\n"
351 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
352 " "SLR" %0,%1\n"
353 "1: sacf 768\n"
354 EX_TABLE(0b,1b)
355 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
356 : "d" (reg0) : "cc", "memory");
357 return size;
358}
359
360unsigned long __strnlen_user(const char __user *src, unsigned long size)
361{
362 update_primary_asce(current);
363 return strnlen_user_srst(src, size);
364}
365EXPORT_SYMBOL(__strnlen_user);
366
367long __strncpy_from_user(char *dst, const char __user *src, long size)
368{
369 size_t done, len, offset, len_str;
370
371 if (unlikely(size <= 0))
372 return 0;
373 done = 0;
374 do {
375 offset = (size_t)src & ~PAGE_MASK;
376 len = min(size - done, PAGE_SIZE - offset);
377 if (copy_from_user(dst, src, len))
378 return -EFAULT;
379 len_str = strnlen(dst, len);
380 done += len_str;
381 src += len_str;
382 dst += len_str;
383 } while ((len_str == len) && (done < size));
384 return done;
385}
386EXPORT_SYMBOL(__strncpy_from_user);
387
388/*
389 * The "old" uaccess variant without mvcos can be enforced with the
390 * uaccess_primary kernel parameter. This is mainly for debugging purposes.
391 */
392static int uaccess_primary __initdata;
393
394static int __init parse_uaccess_pt(char *__unused)
395{
396 uaccess_primary = 1;
397 return 0;
398}
399early_param("uaccess_primary", parse_uaccess_pt);
400
401static int __init uaccess_init(void)
402{
403 if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
404 static_key_slow_inc(&have_mvcos);
405 return 0;
406}
407early_initcall(uaccess_init);
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
deleted file mode 100644
index c7e0e81f4b4e..000000000000
--- a/arch/s390/lib/uaccess.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/*
2 * Copyright IBM Corp. 2007
3 *
4 */
5
6#ifndef __ARCH_S390_LIB_UACCESS_H
7#define __ARCH_S390_LIB_UACCESS_H
8
9unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n);
10unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n);
11unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n);
12unsigned long clear_user_pt(void __user *to, unsigned long n);
13unsigned long strnlen_user_pt(const char __user *src, unsigned long count);
14long strncpy_from_user_pt(char *dst, const char __user *src, long count);
15
16#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
deleted file mode 100644
index ae97b8df11aa..000000000000
--- a/arch/s390/lib/uaccess_mvcos.c
+++ /dev/null
@@ -1,263 +0,0 @@
1/*
2 * Optimized user space space access functions based on mvcos.
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */
8
9#include <linux/jump_label.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <asm/facility.h>
14#include <asm/uaccess.h>
15#include <asm/futex.h>
16#include "uaccess.h"
17
18#ifndef CONFIG_64BIT
19#define AHI "ahi"
20#define ALR "alr"
21#define CLR "clr"
22#define LHI "lhi"
23#define SLR "slr"
24#else
25#define AHI "aghi"
26#define ALR "algr"
27#define CLR "clgr"
28#define LHI "lghi"
29#define SLR "slgr"
30#endif
31
32static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE;
33
34static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
35 unsigned long size)
36{
37 register unsigned long reg0 asm("0") = 0x81UL;
38 unsigned long tmp1, tmp2;
39
40 tmp1 = -4096UL;
41 asm volatile(
42 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
43 "9: jz 7f\n"
44 "1:"ALR" %0,%3\n"
45 " "SLR" %1,%3\n"
46 " "SLR" %2,%3\n"
47 " j 0b\n"
48 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
49 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
50 " "SLR" %4,%1\n"
51 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
52 " jnh 4f\n"
53 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
54 "10:"SLR" %0,%4\n"
55 " "ALR" %2,%4\n"
56 "4:"LHI" %4,-1\n"
57 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
58 " bras %3,6f\n" /* memset loop */
59 " xc 0(1,%2),0(%2)\n"
60 "5: xc 0(256,%2),0(%2)\n"
61 " la %2,256(%2)\n"
62 "6:"AHI" %4,-256\n"
63 " jnm 5b\n"
64 " ex %4,0(%3)\n"
65 " j 8f\n"
66 "7:"SLR" %0,%0\n"
67 "8: \n"
68 EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
69 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
70 : "d" (reg0) : "cc", "memory");
71 return size;
72}
73
74unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
75{
76 if (static_key_true(&have_mvcos))
77 return copy_from_user_mvcos(to, from, n);
78 return copy_from_user_pt(to, from, n);
79}
80EXPORT_SYMBOL(__copy_from_user);
81
82static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
83 unsigned long size)
84{
85 register unsigned long reg0 asm("0") = 0x810000UL;
86 unsigned long tmp1, tmp2;
87
88 tmp1 = -4096UL;
89 asm volatile(
90 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
91 "6: jz 4f\n"
92 "1:"ALR" %0,%3\n"
93 " "SLR" %1,%3\n"
94 " "SLR" %2,%3\n"
95 " j 0b\n"
96 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
97 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
98 " "SLR" %4,%1\n"
99 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
100 " jnh 5f\n"
101 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
102 "7:"SLR" %0,%4\n"
103 " j 5f\n"
104 "4:"SLR" %0,%0\n"
105 "5: \n"
106 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
107 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
108 : "d" (reg0) : "cc", "memory");
109 return size;
110}
111
112unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
113{
114 if (static_key_true(&have_mvcos))
115 return copy_to_user_mvcos(to, from, n);
116 return copy_to_user_pt(to, from, n);
117}
118EXPORT_SYMBOL(__copy_to_user);
119
120static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
121 unsigned long size)
122{
123 register unsigned long reg0 asm("0") = 0x810081UL;
124 unsigned long tmp1, tmp2;
125
126 tmp1 = -4096UL;
127 /* FIXME: copy with reduced length. */
128 asm volatile(
129 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
130 " jz 2f\n"
131 "1:"ALR" %0,%3\n"
132 " "SLR" %1,%3\n"
133 " "SLR" %2,%3\n"
134 " j 0b\n"
135 "2:"SLR" %0,%0\n"
136 "3: \n"
137 EX_TABLE(0b,3b)
138 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
139 : "d" (reg0) : "cc", "memory");
140 return size;
141}
142
143unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
144{
145 if (static_key_true(&have_mvcos))
146 return copy_in_user_mvcos(to, from, n);
147 return copy_in_user_pt(to, from, n);
148}
149EXPORT_SYMBOL(__copy_in_user);
150
151static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
152{
153 register unsigned long reg0 asm("0") = 0x810000UL;
154 unsigned long tmp1, tmp2;
155
156 tmp1 = -4096UL;
157 asm volatile(
158 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
159 " jz 4f\n"
160 "1:"ALR" %0,%2\n"
161 " "SLR" %1,%2\n"
162 " j 0b\n"
163 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
164 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
165 " "SLR" %3,%1\n"
166 " "CLR" %0,%3\n" /* copy crosses next page boundary? */
167 " jnh 5f\n"
168 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
169 " "SLR" %0,%3\n"
170 " j 5f\n"
171 "4:"SLR" %0,%0\n"
172 "5: \n"
173 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
174 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
175 : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
176 return size;
177}
178
179unsigned long __clear_user(void __user *to, unsigned long size)
180{
181 if (static_key_true(&have_mvcos))
182 return clear_user_mvcos(to, size);
183 return clear_user_pt(to, size);
184}
185EXPORT_SYMBOL(__clear_user);
186
187static inline unsigned long strnlen_user_mvcos(const char __user *src,
188 unsigned long count)
189{
190 unsigned long done, len, offset, len_str;
191 char buf[256];
192
193 done = 0;
194 do {
195 offset = (unsigned long)src & ~PAGE_MASK;
196 len = min(256UL, PAGE_SIZE - offset);
197 len = min(count - done, len);
198 if (copy_from_user_mvcos(buf, src, len))
199 return 0;
200 len_str = strnlen(buf, len);
201 done += len_str;
202 src += len_str;
203 } while ((len_str == len) && (done < count));
204 return done + 1;
205}
206
207unsigned long __strnlen_user(const char __user *src, unsigned long count)
208{
209 if (static_key_true(&have_mvcos))
210 return strnlen_user_mvcos(src, count);
211 return strnlen_user_pt(src, count);
212}
213EXPORT_SYMBOL(__strnlen_user);
214
215static inline long strncpy_from_user_mvcos(char *dst, const char __user *src,
216 long count)
217{
218 unsigned long done, len, offset, len_str;
219
220 if (unlikely(count <= 0))
221 return 0;
222 done = 0;
223 do {
224 offset = (unsigned long)src & ~PAGE_MASK;
225 len = min(count - done, PAGE_SIZE - offset);
226 if (copy_from_user_mvcos(dst, src, len))
227 return -EFAULT;
228 len_str = strnlen(dst, len);
229 done += len_str;
230 src += len_str;
231 dst += len_str;
232 } while ((len_str == len) && (done < count));
233 return done;
234}
235
236long __strncpy_from_user(char *dst, const char __user *src, long count)
237{
238 if (static_key_true(&have_mvcos))
239 return strncpy_from_user_mvcos(dst, src, count);
240 return strncpy_from_user_pt(dst, src, count);
241}
242EXPORT_SYMBOL(__strncpy_from_user);
243
244/*
245 * The uaccess page tabe walk variant can be enforced with the "uaccesspt"
246 * kernel parameter. This is mainly for debugging purposes.
247 */
248static int force_uaccess_pt __initdata;
249
250static int __init parse_uaccess_pt(char *__unused)
251{
252 force_uaccess_pt = 1;
253 return 0;
254}
255early_param("uaccesspt", parse_uaccess_pt);
256
257static int __init uaccess_init(void)
258{
259 if (IS_ENABLED(CONFIG_32BIT) || force_uaccess_pt || !test_facility(27))
260 static_key_slow_dec(&have_mvcos);
261 return 0;
262}
263early_initcall(uaccess_init);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
deleted file mode 100644
index 8d39760bae68..000000000000
--- a/arch/s390/lib/uaccess_pt.c
+++ /dev/null
@@ -1,471 +0,0 @@
1/*
2 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
4 *
5 * Copyright IBM Corp. 2006, 2012
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */
8
9#include <linux/errno.h>
10#include <linux/hardirq.h>
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <asm/uaccess.h>
14#include <asm/futex.h>
15#include "uaccess.h"
16
17#ifndef CONFIG_64BIT
18#define AHI "ahi"
19#define SLR "slr"
20#else
21#define AHI "aghi"
22#define SLR "slgr"
23#endif
24
25static unsigned long strnlen_kernel(const char __user *src, unsigned long count)
26{
27 register unsigned long reg0 asm("0") = 0UL;
28 unsigned long tmp1, tmp2;
29
30 asm volatile(
31 " la %2,0(%1)\n"
32 " la %3,0(%0,%1)\n"
33 " "SLR" %0,%0\n"
34 "0: srst %3,%2\n"
35 " jo 0b\n"
36 " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
37 " "SLR" %0,%1\n"
38 "1:\n"
39 EX_TABLE(0b,1b)
40 : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
41 : "d" (reg0) : "cc", "memory");
42 return count;
43}
44
45static unsigned long copy_in_kernel(void __user *to, const void __user *from,
46 unsigned long count)
47{
48 unsigned long tmp1;
49
50 asm volatile(
51 " "AHI" %0,-1\n"
52 " jo 5f\n"
53 " bras %3,3f\n"
54 "0:"AHI" %0,257\n"
55 "1: mvc 0(1,%1),0(%2)\n"
56 " la %1,1(%1)\n"
57 " la %2,1(%2)\n"
58 " "AHI" %0,-1\n"
59 " jnz 1b\n"
60 " j 5f\n"
61 "2: mvc 0(256,%1),0(%2)\n"
62 " la %1,256(%1)\n"
63 " la %2,256(%2)\n"
64 "3:"AHI" %0,-256\n"
65 " jnm 2b\n"
66 "4: ex %0,1b-0b(%3)\n"
67 "5:"SLR" %0,%0\n"
68 "6:\n"
69 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
70 : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
71 : : "cc", "memory");
72 return count;
73}
74
75/*
76 * Returns kernel address for user virtual address. If the returned address is
77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the
78 * address contains the (negative) exception code.
79 */
80#ifdef CONFIG_64BIT
81
82static unsigned long follow_table(struct mm_struct *mm,
83 unsigned long address, int write)
84{
85 unsigned long *table = (unsigned long *)__pa(mm->pgd);
86
87 if (unlikely(address > mm->context.asce_limit - 1))
88 return -0x38UL;
89 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
90 case _ASCE_TYPE_REGION1:
91 table = table + ((address >> 53) & 0x7ff);
92 if (unlikely(*table & _REGION_ENTRY_INVALID))
93 return -0x39UL;
94 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
95 /* fallthrough */
96 case _ASCE_TYPE_REGION2:
97 table = table + ((address >> 42) & 0x7ff);
98 if (unlikely(*table & _REGION_ENTRY_INVALID))
99 return -0x3aUL;
100 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
101 /* fallthrough */
102 case _ASCE_TYPE_REGION3:
103 table = table + ((address >> 31) & 0x7ff);
104 if (unlikely(*table & _REGION_ENTRY_INVALID))
105 return -0x3bUL;
106 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
107 /* fallthrough */
108 case _ASCE_TYPE_SEGMENT:
109 table = table + ((address >> 20) & 0x7ff);
110 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
111 return -0x10UL;
112 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
113 if (write && (*table & _SEGMENT_ENTRY_PROTECT))
114 return -0x04UL;
115 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
116 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
117 }
118 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
119 }
120 table = table + ((address >> 12) & 0xff);
121 if (unlikely(*table & _PAGE_INVALID))
122 return -0x11UL;
123 if (write && (*table & _PAGE_PROTECT))
124 return -0x04UL;
125 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
126}
127
128#else /* CONFIG_64BIT */
129
130static unsigned long follow_table(struct mm_struct *mm,
131 unsigned long address, int write)
132{
133 unsigned long *table = (unsigned long *)__pa(mm->pgd);
134
135 table = table + ((address >> 20) & 0x7ff);
136 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
137 return -0x10UL;
138 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
139 table = table + ((address >> 12) & 0xff);
140 if (unlikely(*table & _PAGE_INVALID))
141 return -0x11UL;
142 if (write && (*table & _PAGE_PROTECT))
143 return -0x04UL;
144 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
145}
146
147#endif /* CONFIG_64BIT */
148
149static inline unsigned long __user_copy_pt(unsigned long uaddr, void *kptr,
150 unsigned long n, int write_user)
151{
152 struct mm_struct *mm = current->mm;
153 unsigned long offset, done, size, kaddr;
154 void *from, *to;
155
156 if (!mm)
157 return n;
158 done = 0;
159retry:
160 spin_lock(&mm->page_table_lock);
161 do {
162 kaddr = follow_table(mm, uaddr, write_user);
163 if (IS_ERR_VALUE(kaddr))
164 goto fault;
165
166 offset = uaddr & ~PAGE_MASK;
167 size = min(n - done, PAGE_SIZE - offset);
168 if (write_user) {
169 to = (void *) kaddr;
170 from = kptr + done;
171 } else {
172 from = (void *) kaddr;
173 to = kptr + done;
174 }
175 memcpy(to, from, size);
176 done += size;
177 uaddr += size;
178 } while (done < n);
179 spin_unlock(&mm->page_table_lock);
180 return n - done;
181fault:
182 spin_unlock(&mm->page_table_lock);
183 if (__handle_fault(uaddr, -kaddr, write_user))
184 return n - done;
185 goto retry;
186}
187
188/*
189 * Do DAT for user address by page table walk, return kernel address.
190 * This function needs to be called with current->mm->page_table_lock held.
191 */
192static inline unsigned long __dat_user_addr(unsigned long uaddr, int write)
193{
194 struct mm_struct *mm = current->mm;
195 unsigned long kaddr;
196 int rc;
197
198retry:
199 kaddr = follow_table(mm, uaddr, write);
200 if (IS_ERR_VALUE(kaddr))
201 goto fault;
202
203 return kaddr;
204fault:
205 spin_unlock(&mm->page_table_lock);
206 rc = __handle_fault(uaddr, -kaddr, write);
207 spin_lock(&mm->page_table_lock);
208 if (!rc)
209 goto retry;
210 return 0;
211}
212
213unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n)
214{
215 unsigned long rc;
216
217 if (segment_eq(get_fs(), KERNEL_DS))
218 return copy_in_kernel((void __user *) to, from, n);
219 rc = __user_copy_pt((unsigned long) from, to, n, 0);
220 if (unlikely(rc))
221 memset(to + n - rc, 0, rc);
222 return rc;
223}
224
225unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n)
226{
227 if (segment_eq(get_fs(), KERNEL_DS))
228 return copy_in_kernel(to, (void __user *) from, n);
229 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
230}
231
232unsigned long clear_user_pt(void __user *to, unsigned long n)
233{
234 void *zpage = (void *) empty_zero_page;
235 unsigned long done, size, ret;
236
237 done = 0;
238 do {
239 if (n - done > PAGE_SIZE)
240 size = PAGE_SIZE;
241 else
242 size = n - done;
243 if (segment_eq(get_fs(), KERNEL_DS))
244 ret = copy_in_kernel(to, (void __user *) zpage, n);
245 else
246 ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
247 done += size;
248 to += size;
249 if (ret)
250 return ret + n - done;
251 } while (done < n);
252 return 0;
253}
254
255unsigned long strnlen_user_pt(const char __user *src, unsigned long count)
256{
257 unsigned long uaddr = (unsigned long) src;
258 struct mm_struct *mm = current->mm;
259 unsigned long offset, done, len, kaddr;
260 unsigned long len_str;
261
262 if (unlikely(!count))
263 return 0;
264 if (segment_eq(get_fs(), KERNEL_DS))
265 return strnlen_kernel(src, count);
266 if (!mm)
267 return 0;
268 done = 0;
269retry:
270 spin_lock(&mm->page_table_lock);
271 do {
272 kaddr = follow_table(mm, uaddr, 0);
273 if (IS_ERR_VALUE(kaddr))
274 goto fault;
275
276 offset = uaddr & ~PAGE_MASK;
277 len = min(count - done, PAGE_SIZE - offset);
278 len_str = strnlen((char *) kaddr, len);
279 done += len_str;
280 uaddr += len_str;
281 } while ((len_str == len) && (done < count));
282 spin_unlock(&mm->page_table_lock);
283 return done + 1;
284fault:
285 spin_unlock(&mm->page_table_lock);
286 if (__handle_fault(uaddr, -kaddr, 0))
287 return 0;
288 goto retry;
289}
290
291long strncpy_from_user_pt(char *dst, const char __user *src, long count)
292{
293 unsigned long done, len, offset, len_str;
294
295 if (unlikely(count <= 0))
296 return 0;
297 done = 0;
298 do {
299 offset = (unsigned long)src & ~PAGE_MASK;
300 len = min(count - done, PAGE_SIZE - offset);
301 if (segment_eq(get_fs(), KERNEL_DS)) {
302 if (copy_in_kernel((void __user *) dst, src, len))
303 return -EFAULT;
304 } else {
305 if (__user_copy_pt((unsigned long) src, dst, len, 0))
306 return -EFAULT;
307 }
308 len_str = strnlen(dst, len);
309 done += len_str;
310 src += len_str;
311 dst += len_str;
312 } while ((len_str == len) && (done < count));
313 return done;
314}
315
316unsigned long copy_in_user_pt(void __user *to, const void __user *from,
317 unsigned long n)
318{
319 struct mm_struct *mm = current->mm;
320 unsigned long offset_max, uaddr, done, size, error_code;
321 unsigned long uaddr_from = (unsigned long) from;
322 unsigned long uaddr_to = (unsigned long) to;
323 unsigned long kaddr_to, kaddr_from;
324 int write_user;
325
326 if (segment_eq(get_fs(), KERNEL_DS))
327 return copy_in_kernel(to, from, n);
328 if (!mm)
329 return n;
330 done = 0;
331retry:
332 spin_lock(&mm->page_table_lock);
333 do {
334 write_user = 0;
335 uaddr = uaddr_from;
336 kaddr_from = follow_table(mm, uaddr_from, 0);
337 error_code = kaddr_from;
338 if (IS_ERR_VALUE(error_code))
339 goto fault;
340
341 write_user = 1;
342 uaddr = uaddr_to;
343 kaddr_to = follow_table(mm, uaddr_to, 1);
344 error_code = (unsigned long) kaddr_to;
345 if (IS_ERR_VALUE(error_code))
346 goto fault;
347
348 offset_max = max(uaddr_from & ~PAGE_MASK,
349 uaddr_to & ~PAGE_MASK);
350 size = min(n - done, PAGE_SIZE - offset_max);
351
352 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
353 done += size;
354 uaddr_from += size;
355 uaddr_to += size;
356 } while (done < n);
357 spin_unlock(&mm->page_table_lock);
358 return n - done;
359fault:
360 spin_unlock(&mm->page_table_lock);
361 if (__handle_fault(uaddr, -error_code, write_user))
362 return n - done;
363 goto retry;
364}
365
366#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
367 asm volatile("0: l %1,0(%6)\n" \
368 "1: " insn \
369 "2: cs %1,%2,0(%6)\n" \
370 "3: jl 1b\n" \
371 " lhi %0,0\n" \
372 "4:\n" \
373 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
374 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
375 "=m" (*uaddr) \
376 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
377 "m" (*uaddr) : "cc" );
378
379static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
380{
381 int oldval = 0, newval, ret;
382
383 switch (op) {
384 case FUTEX_OP_SET:
385 __futex_atomic_op("lr %2,%5\n",
386 ret, oldval, newval, uaddr, oparg);
387 break;
388 case FUTEX_OP_ADD:
389 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
390 ret, oldval, newval, uaddr, oparg);
391 break;
392 case FUTEX_OP_OR:
393 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
394 ret, oldval, newval, uaddr, oparg);
395 break;
396 case FUTEX_OP_ANDN:
397 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
398 ret, oldval, newval, uaddr, oparg);
399 break;
400 case FUTEX_OP_XOR:
401 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
402 ret, oldval, newval, uaddr, oparg);
403 break;
404 default:
405 ret = -ENOSYS;
406 }
407 if (ret == 0)
408 *old = oldval;
409 return ret;
410}
411
412int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old)
413{
414 int ret;
415
416 if (segment_eq(get_fs(), KERNEL_DS))
417 return __futex_atomic_op_pt(op, uaddr, oparg, old);
418 if (unlikely(!current->mm))
419 return -EFAULT;
420 spin_lock(&current->mm->page_table_lock);
421 uaddr = (u32 __force __user *)
422 __dat_user_addr((__force unsigned long) uaddr, 1);
423 if (!uaddr) {
424 spin_unlock(&current->mm->page_table_lock);
425 return -EFAULT;
426 }
427 get_page(virt_to_page(uaddr));
428 spin_unlock(&current->mm->page_table_lock);
429 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
430 put_page(virt_to_page(uaddr));
431 return ret;
432}
433
434static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
435 u32 oldval, u32 newval)
436{
437 int ret;
438
439 asm volatile("0: cs %1,%4,0(%5)\n"
440 "1: la %0,0\n"
441 "2:\n"
442 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
443 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
444 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
445 : "cc", "memory" );
446 *uval = oldval;
447 return ret;
448}
449
450int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
451 u32 oldval, u32 newval)
452{
453 int ret;
454
455 if (segment_eq(get_fs(), KERNEL_DS))
456 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
457 if (unlikely(!current->mm))
458 return -EFAULT;
459 spin_lock(&current->mm->page_table_lock);
460 uaddr = (u32 __force __user *)
461 __dat_user_addr((__force unsigned long) uaddr, 1);
462 if (!uaddr) {
463 spin_unlock(&current->mm->page_table_lock);
464 return -EFAULT;
465 }
466 get_page(virt_to_page(uaddr));
467 spin_unlock(&current->mm->page_table_lock);
468 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
469 put_page(virt_to_page(uaddr));
470 return ret;
471}