aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/uaccess.h
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2006-09-20 09:59:42 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-20 09:59:42 -0400
commitd02765d1af743567398eb6d523dea0ba5e5e7e8e (patch)
tree9a39c21d9924a8d81ce85254cd3d013dbe50d23e /include/asm-s390/uaccess.h
parent6837a8c352efcc5efc70424e9bfd94ff9bfa9a47 (diff)
[S390] Make user-copy operations run-time configurable.
Introduces a struct uaccess_ops which allows setting user-copy operations at run-time. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/uaccess.h')
-rw-r--r--include/asm-s390/uaccess.h171
1 files changed, 56 insertions, 115 deletions
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 0b7c0ca4c3d7..39a2716ae188 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -47,7 +47,7 @@
47 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 47 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
48 asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ 48 asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \
49}) 49})
50#else 50#else /* __s390x__ */
51#define set_fs(x) \ 51#define set_fs(x) \
52({ \ 52({ \
53 unsigned long __pto; \ 53 unsigned long __pto; \
@@ -56,7 +56,7 @@
56 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 56 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
57 asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \ 57 asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \
58}) 58})
59#endif 59#endif /* __s390x__ */
60 60
61#define segment_eq(a,b) ((a).ar4 == (b).ar4) 61#define segment_eq(a,b) ((a).ar4 == (b).ar4)
62 62
@@ -85,76 +85,50 @@ struct exception_table_entry
85 unsigned long insn, fixup; 85 unsigned long insn, fixup;
86}; 86};
87 87
88#ifndef __s390x__ 88struct uaccess_ops {
89#define __uaccess_fixup \ 89 size_t (*copy_from_user)(size_t, const void __user *, void *);
90 ".section .fixup,\"ax\"\n" \ 90 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
91 "2: lhi %0,%4\n" \ 91 size_t (*copy_to_user)(size_t, void __user *, const void *);
92 " bras 1,3f\n" \ 92 size_t (*copy_to_user_small)(size_t, void __user *, const void *);
93 " .long 1b\n" \ 93 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
94 "3: l 1,0(1)\n" \ 94 size_t (*clear_user)(size_t, void __user *);
95 " br 1\n" \ 95 size_t (*strnlen_user)(size_t, const char __user *);
96 ".previous\n" \ 96 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
97 ".section __ex_table,\"a\"\n" \ 97 int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
98 " .align 4\n" \ 98 int (*futex_atomic_cmpxchg)(int __user *, int old, int new);
99 " .long 0b,2b\n" \ 99};
100 ".previous" 100
101#define __uaccess_clobber "cc", "1" 101extern struct uaccess_ops uaccess;
102#else /* __s390x__ */ 102extern struct uaccess_ops uaccess_std;
103#define __uaccess_fixup \ 103
104 ".section .fixup,\"ax\"\n" \ 104static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
105 "2: lghi %0,%4\n" \ 105{
106 " jg 1b\n" \ 106 size = uaccess.copy_to_user_small(size, ptr, x);
107 ".previous\n" \ 107 return size ? -EFAULT : size;
108 ".section __ex_table,\"a\"\n" \ 108}
109 " .align 8\n" \ 109
110 " .quad 0b,2b\n" \ 110static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
111 ".previous" 111{
112#define __uaccess_clobber "cc" 112 size = uaccess.copy_from_user_small(size, ptr, x);
113#endif /* __s390x__ */ 113 return size ? -EFAULT : size;
114}
114 115
115/* 116/*
116 * These are the main single-value transfer routines. They automatically 117 * These are the main single-value transfer routines. They automatically
117 * use the right size if we just have the right pointer type. 118 * use the right size if we just have the right pointer type.
118 */ 119 */
119#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
120#define __put_user_asm(x, ptr, err) \
121({ \
122 err = 0; \
123 asm volatile( \
124 "0: mvcs 0(%1,%2),%3,%0\n" \
125 "1:\n" \
126 __uaccess_fixup \
127 : "+&d" (err) \
128 : "d" (sizeof(*(ptr))), "a" (ptr), "Q" (x), \
129 "K" (-EFAULT) \
130 : __uaccess_clobber ); \
131})
132#else
133#define __put_user_asm(x, ptr, err) \
134({ \
135 err = 0; \
136 asm volatile( \
137 "0: mvcs 0(%1,%2),0(%3),%0\n" \
138 "1:\n" \
139 __uaccess_fixup \
140 : "+&d" (err) \
141 : "d" (sizeof(*(ptr))), "a" (ptr), "a" (&(x)), \
142 "K" (-EFAULT), "m" (x) \
143 : __uaccess_clobber ); \
144})
145#endif
146
147#define __put_user(x, ptr) \ 120#define __put_user(x, ptr) \
148({ \ 121({ \
149 __typeof__(*(ptr)) __x = (x); \ 122 __typeof__(*(ptr)) __x = (x); \
150 int __pu_err; \ 123 int __pu_err = -EFAULT; \
151 __chk_user_ptr(ptr); \ 124 __chk_user_ptr(ptr); \
152 switch (sizeof (*(ptr))) { \ 125 switch (sizeof (*(ptr))) { \
153 case 1: \ 126 case 1: \
154 case 2: \ 127 case 2: \
155 case 4: \ 128 case 4: \
156 case 8: \ 129 case 8: \
157 __put_user_asm(__x, ptr, __pu_err); \ 130 __pu_err = __put_user_fn(sizeof (*(ptr)), \
131 ptr, &__x); \
158 break; \ 132 break; \
159 default: \ 133 default: \
160 __put_user_bad(); \ 134 __put_user_bad(); \
@@ -172,60 +146,36 @@ struct exception_table_entry
172 146
173extern int __put_user_bad(void) __attribute__((noreturn)); 147extern int __put_user_bad(void) __attribute__((noreturn));
174 148
175#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
176#define __get_user_asm(x, ptr, err) \
177({ \
178 err = 0; \
179 asm volatile ( \
180 "0: mvcp %O1(%2,%R1),0(%3),%0\n" \
181 "1:\n" \
182 __uaccess_fixup \
183 : "+&d" (err), "=Q" (x) \
184 : "d" (sizeof(*(ptr))), "a" (ptr), \
185 "K" (-EFAULT) \
186 : __uaccess_clobber ); \
187})
188#else
189#define __get_user_asm(x, ptr, err) \
190({ \
191 err = 0; \
192 asm volatile ( \
193 "0: mvcp 0(%2,%5),0(%3),%0\n" \
194 "1:\n" \
195 __uaccess_fixup \
196 : "+&d" (err), "=m" (x) \
197 : "d" (sizeof(*(ptr))), "a" (ptr), \
198 "K" (-EFAULT), "a" (&(x)) \
199 : __uaccess_clobber ); \
200})
201#endif
202
203#define __get_user(x, ptr) \ 149#define __get_user(x, ptr) \
204({ \ 150({ \
205 int __gu_err; \ 151 int __gu_err = -EFAULT; \
206 __chk_user_ptr(ptr); \ 152 __chk_user_ptr(ptr); \
207 switch (sizeof(*(ptr))) { \ 153 switch (sizeof(*(ptr))) { \
208 case 1: { \ 154 case 1: { \
209 unsigned char __x; \ 155 unsigned char __x; \
210 __get_user_asm(__x, ptr, __gu_err); \ 156 __gu_err = __get_user_fn(sizeof (*(ptr)), \
157 ptr, &__x); \
211 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 158 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
212 break; \ 159 break; \
213 }; \ 160 }; \
214 case 2: { \ 161 case 2: { \
215 unsigned short __x; \ 162 unsigned short __x; \
216 __get_user_asm(__x, ptr, __gu_err); \ 163 __gu_err = __get_user_fn(sizeof (*(ptr)), \
164 ptr, &__x); \
217 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 165 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
218 break; \ 166 break; \
219 }; \ 167 }; \
220 case 4: { \ 168 case 4: { \
221 unsigned int __x; \ 169 unsigned int __x; \
222 __get_user_asm(__x, ptr, __gu_err); \ 170 __gu_err = __get_user_fn(sizeof (*(ptr)), \
171 ptr, &__x); \
223 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 172 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
224 break; \ 173 break; \
225 }; \ 174 }; \
226 case 8: { \ 175 case 8: { \
227 unsigned long long __x; \ 176 unsigned long long __x; \
228 __get_user_asm(__x, ptr, __gu_err); \ 177 __gu_err = __get_user_fn(sizeof (*(ptr)), \
178 ptr, &__x); \
229 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 179 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
230 break; \ 180 break; \
231 }; \ 181 }; \
@@ -247,8 +197,6 @@ extern int __get_user_bad(void) __attribute__((noreturn));
247#define __put_user_unaligned __put_user 197#define __put_user_unaligned __put_user
248#define __get_user_unaligned __get_user 198#define __get_user_unaligned __get_user
249 199
250extern long __copy_to_user_asm(const void *from, long n, void __user *to);
251
252/** 200/**
253 * __copy_to_user: - Copy a block of data into user space, with less checking. 201 * __copy_to_user: - Copy a block of data into user space, with less checking.
254 * @to: Destination address, in user space. 202 * @to: Destination address, in user space.
@@ -266,7 +214,10 @@ extern long __copy_to_user_asm(const void *from, long n, void __user *to);
266static inline unsigned long 214static inline unsigned long
267__copy_to_user(void __user *to, const void *from, unsigned long n) 215__copy_to_user(void __user *to, const void *from, unsigned long n)
268{ 216{
269 return __copy_to_user_asm(from, n, to); 217 if (__builtin_constant_p(n) && (n <= 256))
218 return uaccess.copy_to_user_small(n, to, from);
219 else
220 return uaccess.copy_to_user(n, to, from);
270} 221}
271 222
272#define __copy_to_user_inatomic __copy_to_user 223#define __copy_to_user_inatomic __copy_to_user
@@ -294,8 +245,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
294 return n; 245 return n;
295} 246}
296 247
297extern long __copy_from_user_asm(void *to, long n, const void __user *from);
298
299/** 248/**
300 * __copy_from_user: - Copy a block of data from user space, with less checking. 249 * __copy_from_user: - Copy a block of data from user space, with less checking.
301 * @to: Destination address, in kernel space. 250 * @to: Destination address, in kernel space.
@@ -316,7 +265,10 @@ extern long __copy_from_user_asm(void *to, long n, const void __user *from);
316static inline unsigned long 265static inline unsigned long
317__copy_from_user(void *to, const void __user *from, unsigned long n) 266__copy_from_user(void *to, const void __user *from, unsigned long n)
318{ 267{
319 return __copy_from_user_asm(to, n, from); 268 if (__builtin_constant_p(n) && (n <= 256))
269 return uaccess.copy_from_user_small(n, from, to);
270 else
271 return uaccess.copy_from_user(n, from, to);
320} 272}
321 273
322/** 274/**
@@ -346,13 +298,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
346 return n; 298 return n;
347} 299}
348 300
349extern unsigned long __copy_in_user_asm(const void __user *from, long n,
350 void __user *to);
351
352static inline unsigned long 301static inline unsigned long
353__copy_in_user(void __user *to, const void __user *from, unsigned long n) 302__copy_in_user(void __user *to, const void __user *from, unsigned long n)
354{ 303{
355 return __copy_in_user_asm(from, n, to); 304 return uaccess.copy_in_user(n, to, from);
356} 305}
357 306
358static inline unsigned long 307static inline unsigned long
@@ -360,34 +309,28 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
360{ 309{
361 might_sleep(); 310 might_sleep();
362 if (__access_ok(from,n) && __access_ok(to,n)) 311 if (__access_ok(from,n) && __access_ok(to,n))
363 n = __copy_in_user_asm(from, n, to); 312 n = __copy_in_user(to, from, n);
364 return n; 313 return n;
365} 314}
366 315
367/* 316/*
368 * Copy a null terminated string from userspace. 317 * Copy a null terminated string from userspace.
369 */ 318 */
370extern long __strncpy_from_user_asm(long count, char *dst,
371 const char __user *src);
372
373static inline long 319static inline long
374strncpy_from_user(char *dst, const char __user *src, long count) 320strncpy_from_user(char *dst, const char __user *src, long count)
375{ 321{
376 long res = -EFAULT; 322 long res = -EFAULT;
377 might_sleep(); 323 might_sleep();
378 if (access_ok(VERIFY_READ, src, 1)) 324 if (access_ok(VERIFY_READ, src, 1))
379 res = __strncpy_from_user_asm(count, dst, src); 325 res = uaccess.strncpy_from_user(count, src, dst);
380 return res; 326 return res;
381} 327}
382 328
383
384extern long __strnlen_user_asm(long count, const char __user *src);
385
386static inline unsigned long 329static inline unsigned long
387strnlen_user(const char __user * src, unsigned long n) 330strnlen_user(const char __user * src, unsigned long n)
388{ 331{
389 might_sleep(); 332 might_sleep();
390 return __strnlen_user_asm(n, src); 333 return uaccess.strnlen_user(n, src);
391} 334}
392 335
393/** 336/**
@@ -410,12 +353,10 @@ strnlen_user(const char __user * src, unsigned long n)
410 * Zero Userspace 353 * Zero Userspace
411 */ 354 */
412 355
413extern long __clear_user_asm(void __user *to, long n);
414
415static inline unsigned long 356static inline unsigned long
416__clear_user(void __user *to, unsigned long n) 357__clear_user(void __user *to, unsigned long n)
417{ 358{
418 return __clear_user_asm(to, n); 359 return uaccess.clear_user(n, to);
419} 360}
420 361
421static inline unsigned long 362static inline unsigned long
@@ -423,7 +364,7 @@ clear_user(void __user *to, unsigned long n)
423{ 364{
424 might_sleep(); 365 might_sleep();
425 if (access_ok(VERIFY_WRITE, to, n)) 366 if (access_ok(VERIFY_WRITE, to, n))
426 n = __clear_user_asm(to, n); 367 n = uaccess.clear_user(n, to);
427 return n; 368 return n;
428} 369}
429 370