aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-01-23 05:18:36 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-02-21 02:50:14 -0500
commit4f41c2b4567dbfb7ff93e5c552b869e2865bcd9d (patch)
tree5845eccdb038c5e1bc90a0aaf8919f2a65fa8096
parentcfa785e623577cdad2aa721acb23bd3a95eced9a (diff)
s390/uaccess: get rid of indirect function calls
There are only two uaccess variants on s390 left: the version that is used if the mvcos instruction is available, and the page table walk variant. So there is no need for expensive indirect function calls. By default the mvcos variant will be called. If the mvcos instruction is not available it will call the page table walk variant. For minimal performance impact the "if (mvcos_is_available)" is implemented with a jump label, which will be a six byte nop on machines with mvcos. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/futex.h13
-rw-r--r--arch/s390/include/asm/uaccess.h148
-rw-r--r--arch/s390/kernel/setup.c9
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/uaccess.h8
-rw-r--r--arch/s390/lib/uaccess_mvcos.c89
-rw-r--r--arch/s390/lib/uaccess_pt.c31
7 files changed, 152 insertions, 149 deletions
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 51bcaa0fdeef..fda46bd38c99 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -5,7 +5,10 @@
5#include <linux/uaccess.h> 5#include <linux/uaccess.h>
6#include <asm/errno.h> 6#include <asm/errno.h>
7 7
8static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 8int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
9int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);
10
11static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
9{ 12{
10 int op = (encoded_op >> 28) & 7; 13 int op = (encoded_op >> 28) & 7;
11 int cmp = (encoded_op >> 24) & 15; 14 int cmp = (encoded_op >> 24) & 15;
@@ -17,7 +20,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
17 oparg = 1 << oparg; 20 oparg = 1 << oparg;
18 21
19 pagefault_disable(); 22 pagefault_disable();
20 ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); 23 ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
21 pagefault_enable(); 24 pagefault_enable();
22 25
23 if (!ret) { 26 if (!ret) {
@@ -34,10 +37,4 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
34 return ret; 37 return ret;
35} 38}
36 39
37static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
38 u32 oldval, u32 newval)
39{
40 return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
41}
42
43#endif /* _ASM_S390_FUTEX_H */ 40#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 73199636ba98..49885a518e5e 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -92,33 +92,58 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
92#define ARCH_HAS_SORT_EXTABLE 92#define ARCH_HAS_SORT_EXTABLE
93#define ARCH_HAS_SEARCH_EXTABLE 93#define ARCH_HAS_SEARCH_EXTABLE
94 94
95struct uaccess_ops { 95int __handle_fault(unsigned long, unsigned long, int);
96 size_t (*copy_from_user)(void *, const void __user *, size_t);
97 size_t (*copy_to_user)(void __user *, const void *, size_t);
98 size_t (*copy_in_user)(void __user *, const void __user *, size_t);
99 size_t (*clear_user)(void __user *, size_t);
100 size_t (*strnlen_user)(const char __user *, size_t);
101 size_t (*strncpy_from_user)(char *, const char __user *, size_t);
102 int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
103 int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
104};
105 96
106extern struct uaccess_ops uaccess; 97/**
107extern struct uaccess_ops uaccess_mvcos; 98 * __copy_from_user: - Copy a block of data from user space, with less checking.
108extern struct uaccess_ops uaccess_pt; 99 * @to: Destination address, in kernel space.
100 * @from: Source address, in user space.
101 * @n: Number of bytes to copy.
102 *
103 * Context: User context only. This function may sleep.
104 *
105 * Copy data from user space to kernel space. Caller must check
106 * the specified block with access_ok() before calling this function.
107 *
108 * Returns number of bytes that could not be copied.
109 * On success, this will be zero.
110 *
111 * If some data could not be copied, this function will pad the copied
112 * data to the requested size using zero bytes.
113 */
114size_t __must_check __copy_from_user(void *to, const void __user *from,
115 size_t n);
116
117/**
118 * __copy_to_user: - Copy a block of data into user space, with less checking.
119 * @to: Destination address, in user space.
120 * @from: Source address, in kernel space.
121 * @n: Number of bytes to copy.
122 *
123 * Context: User context only. This function may sleep.
124 *
125 * Copy data from kernel space to user space. Caller must check
126 * the specified block with access_ok() before calling this function.
127 *
128 * Returns number of bytes that could not be copied.
129 * On success, this will be zero.
130 */
131unsigned long __must_check __copy_to_user(void __user *to, const void *from,
132 unsigned long n);
109 133
110extern int __handle_fault(unsigned long, unsigned long, int); 134#define __copy_to_user_inatomic __copy_to_user
135#define __copy_from_user_inatomic __copy_from_user
111 136
112static inline int __put_user_fn(void *x, void __user *ptr, size_t size) 137static inline int __put_user_fn(void *x, void __user *ptr, size_t size)
113{ 138{
114 size = uaccess.copy_to_user(ptr, x, size); 139 size = __copy_to_user(ptr, x, size);
115 return size ? -EFAULT : size; 140 return size ? -EFAULT : 0;
116} 141}
117 142
118static inline int __get_user_fn(void *x, const void __user *ptr, size_t size) 143static inline int __get_user_fn(void *x, const void __user *ptr, size_t size)
119{ 144{
120 size = uaccess.copy_from_user(x, ptr, size); 145 size = __copy_from_user(x, ptr, size);
121 return size ? -EFAULT : size; 146 return size ? -EFAULT : 0;
122} 147}
123 148
124/* 149/*
@@ -152,7 +177,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, size_t size)
152}) 177})
153 178
154 179
155extern int __put_user_bad(void) __attribute__((noreturn)); 180int __put_user_bad(void) __attribute__((noreturn));
156 181
157#define __get_user(x, ptr) \ 182#define __get_user(x, ptr) \
158({ \ 183({ \
@@ -200,35 +225,12 @@ extern int __put_user_bad(void) __attribute__((noreturn));
200 __get_user(x, ptr); \ 225 __get_user(x, ptr); \
201}) 226})
202 227
203extern int __get_user_bad(void) __attribute__((noreturn)); 228int __get_user_bad(void) __attribute__((noreturn));
204 229
205#define __put_user_unaligned __put_user 230#define __put_user_unaligned __put_user
206#define __get_user_unaligned __get_user 231#define __get_user_unaligned __get_user
207 232
208/** 233/**
209 * __copy_to_user: - Copy a block of data into user space, with less checking.
210 * @to: Destination address, in user space.
211 * @from: Source address, in kernel space.
212 * @n: Number of bytes to copy.
213 *
214 * Context: User context only. This function may sleep.
215 *
216 * Copy data from kernel space to user space. Caller must check
217 * the specified block with access_ok() before calling this function.
218 *
219 * Returns number of bytes that could not be copied.
220 * On success, this will be zero.
221 */
222static inline unsigned long __must_check
223__copy_to_user(void __user *to, const void *from, unsigned long n)
224{
225 return uaccess.copy_to_user(to, from, n);
226}
227
228#define __copy_to_user_inatomic __copy_to_user
229#define __copy_from_user_inatomic __copy_from_user
230
231/**
232 * copy_to_user: - Copy a block of data into user space. 234 * copy_to_user: - Copy a block of data into user space.
233 * @to: Destination address, in user space. 235 * @to: Destination address, in user space.
234 * @from: Source address, in kernel space. 236 * @from: Source address, in kernel space.
@@ -248,30 +250,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
248 return __copy_to_user(to, from, n); 250 return __copy_to_user(to, from, n);
249} 251}
250 252
251/** 253void copy_from_user_overflow(void)
252 * __copy_from_user: - Copy a block of data from user space, with less checking.
253 * @to: Destination address, in kernel space.
254 * @from: Source address, in user space.
255 * @n: Number of bytes to copy.
256 *
257 * Context: User context only. This function may sleep.
258 *
259 * Copy data from user space to kernel space. Caller must check
260 * the specified block with access_ok() before calling this function.
261 *
262 * Returns number of bytes that could not be copied.
263 * On success, this will be zero.
264 *
265 * If some data could not be copied, this function will pad the copied
266 * data to the requested size using zero bytes.
267 */
268static inline unsigned long __must_check
269__copy_from_user(void *to, const void __user *from, unsigned long n)
270{
271 return uaccess.copy_from_user(to, from, n);
272}
273
274extern void copy_from_user_overflow(void)
275#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 254#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
276__compiletime_warning("copy_from_user() buffer size is not provably correct") 255__compiletime_warning("copy_from_user() buffer size is not provably correct")
277#endif 256#endif
@@ -306,11 +285,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
306 return __copy_from_user(to, from, n); 285 return __copy_from_user(to, from, n);
307} 286}
308 287
309static inline unsigned long __must_check 288unsigned long __must_check
310__copy_in_user(void __user *to, const void __user *from, unsigned long n) 289__copy_in_user(void __user *to, const void __user *from, unsigned long n);
311{
312 return uaccess.copy_in_user(to, from, n);
313}
314 290
315static inline unsigned long __must_check 291static inline unsigned long __must_check
316copy_in_user(void __user *to, const void __user *from, unsigned long n) 292copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -322,18 +298,22 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
322/* 298/*
323 * Copy a null terminated string from userspace. 299 * Copy a null terminated string from userspace.
324 */ 300 */
301
302long __strncpy_from_user(char *dst, const char __user *src, long count);
303
325static inline long __must_check 304static inline long __must_check
326strncpy_from_user(char *dst, const char __user *src, long count) 305strncpy_from_user(char *dst, const char __user *src, long count)
327{ 306{
328 might_fault(); 307 might_fault();
329 return uaccess.strncpy_from_user(dst, src, count); 308 return __strncpy_from_user(dst, src, count);
330} 309}
331 310
332static inline unsigned long 311size_t __must_check __strnlen_user(const char __user *src, size_t count);
333strnlen_user(const char __user * src, unsigned long n) 312
313static inline size_t strnlen_user(const char __user *src, size_t n)
334{ 314{
335 might_fault(); 315 might_fault();
336 return uaccess.strnlen_user(src, n); 316 return __strnlen_user(src, n);
337} 317}
338 318
339/** 319/**
@@ -355,21 +335,15 @@ strnlen_user(const char __user * src, unsigned long n)
355/* 335/*
356 * Zero Userspace 336 * Zero Userspace
357 */ 337 */
338size_t __must_check __clear_user(void __user *to, size_t size);
358 339
359static inline unsigned long __must_check 340static inline size_t __must_check clear_user(void __user *to, size_t n)
360__clear_user(void __user *to, unsigned long n)
361{
362 return uaccess.clear_user(to, n);
363}
364
365static inline unsigned long __must_check
366clear_user(void __user *to, unsigned long n)
367{ 341{
368 might_fault(); 342 might_fault();
369 return uaccess.clear_user(to, n); 343 return __clear_user(to, n);
370} 344}
371 345
372extern int copy_to_user_real(void __user *dest, void *src, size_t count); 346int copy_to_user_real(void __user *dest, void *src, size_t count);
373extern int copy_from_user_real(void *dest, void __user *src, size_t count); 347int copy_from_user_real(void *dest, void __user *src, size_t count);
374 348
375#endif /* __S390_UACCESS_H */ 349#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 09e2f468f48b..91ea00955db7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -47,7 +47,6 @@
47#include <linux/compat.h> 47#include <linux/compat.h>
48 48
49#include <asm/ipl.h> 49#include <asm/ipl.h>
50#include <asm/uaccess.h>
51#include <asm/facility.h> 50#include <asm/facility.h>
52#include <asm/smp.h> 51#include <asm/smp.h>
53#include <asm/mmu_context.h> 52#include <asm/mmu_context.h>
@@ -65,12 +64,6 @@
65#include "entry.h" 64#include "entry.h"
66 65
67/* 66/*
68 * User copy operations.
69 */
70struct uaccess_ops uaccess;
71EXPORT_SYMBOL(uaccess);
72
73/*
74 * Machine setup.. 67 * Machine setup..
75 */ 68 */
76unsigned int console_mode = 0; 69unsigned int console_mode = 0;
@@ -1009,8 +1002,6 @@ void __init setup_arch(char **cmdline_p)
1009 init_mm.end_data = (unsigned long) &_edata; 1002 init_mm.end_data = (unsigned long) &_edata;
1010 init_mm.brk = (unsigned long) &_end; 1003 init_mm.brk = (unsigned long) &_end;
1011 1004
1012 uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;
1013
1014 parse_early_param(); 1005 parse_early_param();
1015 detect_memory_layout(memory_chunk, memory_end); 1006 detect_memory_layout(memory_chunk, memory_end);
1016 os_info_init(); 1007 os_info_init();
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b068729e50ac..e3fffe1dff51 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,8 +2,7 @@
2# Makefile for s390-specific library files.. 2# Makefile for s390-specific library files..
3# 3#
4 4
5lib-y += delay.o string.o uaccess_pt.o find.o 5lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
7obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
9lib-$(CONFIG_SMP) += spinlock.o 8lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
index b1a22173d027..e5b9c924b733 100644
--- a/arch/s390/lib/uaccess.h
+++ b/arch/s390/lib/uaccess.h
@@ -6,7 +6,11 @@
6#ifndef __ARCH_S390_LIB_UACCESS_H 6#ifndef __ARCH_S390_LIB_UACCESS_H
7#define __ARCH_S390_LIB_UACCESS_H 7#define __ARCH_S390_LIB_UACCESS_H
8 8
9extern int futex_atomic_op_pt(int, u32 __user *, int, int *); 9size_t copy_from_user_pt(void *to, const void __user *from, size_t n);
10extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); 10size_t copy_to_user_pt(void __user *to, const void *from, size_t n);
11size_t copy_in_user_pt(void __user *to, const void __user *from, size_t n);
12size_t clear_user_pt(void __user *to, size_t n);
13size_t strnlen_user_pt(const char __user *src, size_t count);
14size_t strncpy_from_user_pt(char *dst, const char __user *src, size_t count);
11 15
12#endif /* __ARCH_S390_LIB_UACCESS_H */ 16#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 95123f57aaf8..66f35e15db2d 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -6,7 +6,9 @@
6 * Gerald Schaefer (gerald.schaefer@de.ibm.com) 6 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */ 7 */
8 8
9#include <linux/jump_label.h>
9#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/init.h>
10#include <linux/mm.h> 12#include <linux/mm.h>
11#include <asm/uaccess.h> 13#include <asm/uaccess.h>
12#include <asm/futex.h> 14#include <asm/futex.h>
@@ -26,7 +28,10 @@
26#define SLR "slgr" 28#define SLR "slgr"
27#endif 29#endif
28 30
29static size_t copy_from_user_mvcos(void *x, const void __user *ptr, size_t size) 31static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE;
32
33static inline size_t copy_from_user_mvcos(void *x, const void __user *ptr,
34 size_t size)
30{ 35{
31 register unsigned long reg0 asm("0") = 0x81UL; 36 register unsigned long reg0 asm("0") = 0x81UL;
32 unsigned long tmp1, tmp2; 37 unsigned long tmp1, tmp2;
@@ -65,7 +70,16 @@ static size_t copy_from_user_mvcos(void *x, const void __user *ptr, size_t size)
65 return size; 70 return size;
66} 71}
67 72
68static size_t copy_to_user_mvcos(void __user *ptr, const void *x, size_t size) 73size_t __copy_from_user(void *to, const void __user *from, size_t n)
74{
75 if (static_key_true(&have_mvcos))
76 return copy_from_user_mvcos(to, from, n);
77 return copy_from_user_pt(to, from, n);
78}
79EXPORT_SYMBOL(__copy_from_user);
80
81static inline size_t copy_to_user_mvcos(void __user *ptr, const void *x,
82 size_t size)
69{ 83{
70 register unsigned long reg0 asm("0") = 0x810000UL; 84 register unsigned long reg0 asm("0") = 0x810000UL;
71 unsigned long tmp1, tmp2; 85 unsigned long tmp1, tmp2;
@@ -94,8 +108,16 @@ static size_t copy_to_user_mvcos(void __user *ptr, const void *x, size_t size)
94 return size; 108 return size;
95} 109}
96 110
97static size_t copy_in_user_mvcos(void __user *to, const void __user *from, 111size_t __copy_to_user(void __user *to, const void *from, size_t n)
98 size_t size) 112{
113 if (static_key_true(&have_mvcos))
114 return copy_to_user_mvcos(to, from, n);
115 return copy_to_user_pt(to, from, n);
116}
117EXPORT_SYMBOL(__copy_to_user);
118
119static inline size_t copy_in_user_mvcos(void __user *to, const void __user *from,
120 size_t size)
99{ 121{
100 register unsigned long reg0 asm("0") = 0x810081UL; 122 register unsigned long reg0 asm("0") = 0x810081UL;
101 unsigned long tmp1, tmp2; 123 unsigned long tmp1, tmp2;
@@ -117,7 +139,15 @@ static size_t copy_in_user_mvcos(void __user *to, const void __user *from,
117 return size; 139 return size;
118} 140}
119 141
120static size_t clear_user_mvcos(void __user *to, size_t size) 142size_t __copy_in_user(void __user *to, const void __user *from, size_t n)
143{
144 if (static_key_true(&have_mvcos))
145 return copy_in_user_mvcos(to, from, n);
146 return copy_in_user_pt(to, from, n);
147}
148EXPORT_SYMBOL(__copy_in_user);
149
150static inline size_t clear_user_mvcos(void __user *to, size_t size)
121{ 151{
122 register unsigned long reg0 asm("0") = 0x810000UL; 152 register unsigned long reg0 asm("0") = 0x810000UL;
123 unsigned long tmp1, tmp2; 153 unsigned long tmp1, tmp2;
@@ -145,7 +175,15 @@ static size_t clear_user_mvcos(void __user *to, size_t size)
145 return size; 175 return size;
146} 176}
147 177
148static size_t strnlen_user_mvcos(const char __user *src, size_t count) 178size_t __clear_user(void __user *to, size_t size)
179{
180 if (static_key_true(&have_mvcos))
181 return clear_user_mvcos(to, size);
182 return clear_user_pt(to, size);
183}
184EXPORT_SYMBOL(__clear_user);
185
186static inline size_t strnlen_user_mvcos(const char __user *src, size_t count)
149{ 187{
150 size_t done, len, offset, len_str; 188 size_t done, len, offset, len_str;
151 char buf[256]; 189 char buf[256];
@@ -164,10 +202,18 @@ static size_t strnlen_user_mvcos(const char __user *src, size_t count)
164 return done + 1; 202 return done + 1;
165} 203}
166 204
167static size_t strncpy_from_user_mvcos(char *dst, const char __user *src, 205size_t __strnlen_user(const char __user *src, size_t count)
168 size_t count)
169{ 206{
170 size_t done, len, offset, len_str; 207 if (static_key_true(&have_mvcos))
208 return strnlen_user_mvcos(src, count);
209 return strnlen_user_pt(src, count);
210}
211EXPORT_SYMBOL(__strnlen_user);
212
213static inline size_t strncpy_from_user_mvcos(char *dst, const char __user *src,
214 size_t count)
215{
216 unsigned long done, len, offset, len_str;
171 217
172 if (unlikely(!count)) 218 if (unlikely(!count))
173 return 0; 219 return 0;
@@ -185,13 +231,18 @@ static size_t strncpy_from_user_mvcos(char *dst, const char __user *src,
185 return done; 231 return done;
186} 232}
187 233
188struct uaccess_ops uaccess_mvcos = { 234long __strncpy_from_user(char *dst, const char __user *src, long count)
189 .copy_from_user = copy_from_user_mvcos, 235{
190 .copy_to_user = copy_to_user_mvcos, 236 if (static_key_true(&have_mvcos))
191 .copy_in_user = copy_in_user_mvcos, 237 return strncpy_from_user_mvcos(dst, src, count);
192 .clear_user = clear_user_mvcos, 238 return strncpy_from_user_pt(dst, src, count);
193 .strnlen_user = strnlen_user_mvcos, 239}
194 .strncpy_from_user = strncpy_from_user_mvcos, 240EXPORT_SYMBOL(__strncpy_from_user);
195 .futex_atomic_op = futex_atomic_op_pt, 241
196 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, 242static int __init uaccess_init(void)
197}; 243{
244 if (!MACHINE_HAS_MVCOS)
245 static_key_slow_dec(&have_mvcos);
246 return 0;
247}
248early_initcall(uaccess_init);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 2fa696b39b56..b49c3a440a24 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -211,7 +211,7 @@ fault:
211 return 0; 211 return 0;
212} 212}
213 213
214static size_t copy_from_user_pt(void *to, const void __user *from, size_t n) 214size_t copy_from_user_pt(void *to, const void __user *from, size_t n)
215{ 215{
216 size_t rc; 216 size_t rc;
217 217
@@ -223,14 +223,14 @@ static size_t copy_from_user_pt(void *to, const void __user *from, size_t n)
223 return rc; 223 return rc;
224} 224}
225 225
226static size_t copy_to_user_pt(void __user *to, const void *from, size_t n) 226size_t copy_to_user_pt(void __user *to, const void *from, size_t n)
227{ 227{
228 if (segment_eq(get_fs(), KERNEL_DS)) 228 if (segment_eq(get_fs(), KERNEL_DS))
229 return copy_in_kernel(to, (void __user *) from, n); 229 return copy_in_kernel(to, (void __user *) from, n);
230 return __user_copy_pt((unsigned long) to, (void *) from, n, 1); 230 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
231} 231}
232 232
233static size_t clear_user_pt(void __user *to, size_t n) 233size_t clear_user_pt(void __user *to, size_t n)
234{ 234{
235 void *zpage = (void *) empty_zero_page; 235 void *zpage = (void *) empty_zero_page;
236 long done, size, ret; 236 long done, size, ret;
@@ -253,7 +253,7 @@ static size_t clear_user_pt(void __user *to, size_t n)
253 return 0; 253 return 0;
254} 254}
255 255
256static size_t strnlen_user_pt(const char __user *src, size_t count) 256size_t strnlen_user_pt(const char __user *src, size_t count)
257{ 257{
258 unsigned long uaddr = (unsigned long) src; 258 unsigned long uaddr = (unsigned long) src;
259 struct mm_struct *mm = current->mm; 259 struct mm_struct *mm = current->mm;
@@ -289,8 +289,7 @@ fault:
289 goto retry; 289 goto retry;
290} 290}
291 291
292static size_t strncpy_from_user_pt(char *dst, const char __user *src, 292size_t strncpy_from_user_pt(char *dst, const char __user *src, size_t count)
293 size_t count)
294{ 293{
295 size_t done, len, offset, len_str; 294 size_t done, len, offset, len_str;
296 295
@@ -315,8 +314,7 @@ static size_t strncpy_from_user_pt(char *dst, const char __user *src,
315 return done; 314 return done;
316} 315}
317 316
318static size_t copy_in_user_pt(void __user *to, const void __user *from, 317size_t copy_in_user_pt(void __user *to, const void __user *from, size_t n)
319 size_t n)
320{ 318{
321 struct mm_struct *mm = current->mm; 319 struct mm_struct *mm = current->mm;
322 unsigned long offset_max, uaddr, done, size, error_code; 320 unsigned long offset_max, uaddr, done, size, error_code;
@@ -411,7 +409,7 @@ static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
411 return ret; 409 return ret;
412} 410}
413 411
414int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) 412int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old)
415{ 413{
416 int ret; 414 int ret;
417 415
@@ -449,8 +447,8 @@ static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
449 return ret; 447 return ret;
450} 448}
451 449
452int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, 450int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
453 u32 oldval, u32 newval) 451 u32 oldval, u32 newval)
454{ 452{
455 int ret; 453 int ret;
456 454
@@ -471,14 +469,3 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
471 put_page(virt_to_page(uaddr)); 469 put_page(virt_to_page(uaddr));
472 return ret; 470 return ret;
473} 471}
474
475struct uaccess_ops uaccess_pt = {
476 .copy_from_user = copy_from_user_pt,
477 .copy_to_user = copy_to_user_pt,
478 .copy_in_user = copy_in_user_pt,
479 .clear_user = clear_user_pt,
480 .strnlen_user = strnlen_user_pt,
481 .strncpy_from_user = strncpy_from_user_pt,
482 .futex_atomic_op = futex_atomic_op_pt,
483 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
484};