aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/uaccess_32.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/uaccess_32.h')
-rw-r--r--include/asm-sh/uaccess_32.h277
1 files changed, 48 insertions, 229 deletions
diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h
index 44abd1682329..ae0d24f6653f 100644
--- a/include/asm-sh/uaccess_32.h
+++ b/include/asm-sh/uaccess_32.h
@@ -12,56 +12,6 @@
12#ifndef __ASM_SH_UACCESS_32_H 12#ifndef __ASM_SH_UACCESS_32_H
13#define __ASM_SH_UACCESS_32_H 13#define __ASM_SH_UACCESS_32_H
14 14
15#include <linux/errno.h>
16#include <linux/sched.h>
17#include <asm/segment.h>
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22#define __addr_ok(addr) \
23 ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
24
25/*
26 * __access_ok: Check if address with size is OK or not.
27 *
28 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
29 *
30 * sum := addr + size; carry? --> flag = true;
31 * if (sum >= addr_limit) flag = true;
32 */
33#define __access_ok(addr, size) \
34 (__addr_ok((addr) + (size)))
35#define access_ok(type, addr, size) \
36 (__chk_user_ptr(addr), \
37 __access_ok((unsigned long __force)(addr), (size)))
38
39/*
40 * Uh, these should become the main single-value transfer routines ...
41 * They automatically use the right size if we just have the right
42 * pointer type ...
43 *
44 * As SuperH uses the same address space for kernel and user data, we
45 * can just do these as direct assignments.
46 *
47 * Careful to not
48 * (a) re-use the arguments for side effects (sizeof is ok)
49 * (b) require any knowledge of processes at this stage
50 */
51#define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
52#define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
53
54/*
55 * The "__xxx" versions do not do address space checking, useful when
56 * doing multiple accesses to the same area (the user has to do the
57 * checks by hand with "access_ok()")
58 */
59#define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
60#define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
61
62struct __large_struct { unsigned long buf[100]; };
63#define __m(x) (*(struct __large_struct __user *)(x))
64
65#define __get_user_size(x,ptr,size,retval) \ 15#define __get_user_size(x,ptr,size,retval) \
66do { \ 16do { \
67 retval = 0; \ 17 retval = 0; \
@@ -81,28 +31,7 @@ do { \
81 } \ 31 } \
82} while (0) 32} while (0)
83 33
84#define __get_user_nocheck(x,ptr,size) \ 34#ifdef CONFIG_MMU
85({ \
86 long __gu_err; \
87 unsigned long __gu_val; \
88 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
89 __chk_user_ptr(ptr); \
90 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
91 (x) = (__typeof__(*(ptr)))__gu_val; \
92 __gu_err; \
93})
94
95#define __get_user_check(x,ptr,size) \
96({ \
97 long __gu_err = -EFAULT; \
98 unsigned long __gu_val = 0; \
99 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
100 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
101 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
102 (x) = (__typeof__(*(ptr)))__gu_val; \
103 __gu_err; \
104})
105
106#define __get_user_asm(x, addr, err, insn) \ 35#define __get_user_asm(x, addr, err, insn) \
107({ \ 36({ \
108__asm__ __volatile__( \ 37__asm__ __volatile__( \
@@ -123,6 +52,16 @@ __asm__ __volatile__( \
123 ".previous" \ 52 ".previous" \
124 :"=&r" (err), "=&r" (x) \ 53 :"=&r" (err), "=&r" (x) \
125 :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); }) 54 :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
55#else
56#define __get_user_asm(x, addr, err, insn) \
57do { \
58 __asm__ __volatile__ ( \
59 "mov." insn " %1, %0\n\t" \
60 : "=&r" (x) \
61 : "m" (__m(addr)) \
62 ); \
63} while (0)
64#endif /* CONFIG_MMU */
126 65
127extern void __get_user_unknown(void); 66extern void __get_user_unknown(void);
128 67
@@ -147,45 +86,41 @@ do { \
147 } \ 86 } \
148} while (0) 87} while (0)
149 88
150#define __put_user_nocheck(x,ptr,size) \ 89#ifdef CONFIG_MMU
151({ \ 90#define __put_user_asm(x, addr, err, insn) \
152 long __pu_err; \ 91do { \
153 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 92 __asm__ __volatile__ ( \
154 __chk_user_ptr(ptr); \ 93 "1:\n\t" \
155 __put_user_size((x), __pu_addr, (size), __pu_err); \ 94 "mov." insn " %1, %2\n\t" \
156 __pu_err; \ 95 "2:\n" \
157}) 96 ".section .fixup,\"ax\"\n" \
158 97 "3:\n\t" \
159#define __put_user_check(x,ptr,size) \ 98 "mov.l 4f, %0\n\t" \
160({ \ 99 "jmp @%0\n\t" \
161 long __pu_err = -EFAULT; \ 100 " mov %3, %0\n\t" \
162 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 101 ".balign 4\n" \
163 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \ 102 "4: .long 2b\n\t" \
164 __put_user_size((x), __pu_addr, (size), \ 103 ".previous\n" \
165 __pu_err); \ 104 ".section __ex_table,\"a\"\n\t" \
166 __pu_err; \ 105 ".long 1b, 3b\n\t" \
167}) 106 ".previous" \
168 107 : "=&r" (err) \
169#define __put_user_asm(x, addr, err, insn) \ 108 : "r" (x), "m" (__m(addr)), "i" (-EFAULT), \
170({ \ 109 "0" (err) \
171__asm__ __volatile__( \ 110 : "memory" \
172 "1:\n\t" \ 111 ); \
173 "mov." insn " %1, %2\n\t" \ 112} while (0)
174 "2:\n" \ 113#else
175 ".section .fixup,\"ax\"\n" \ 114#define __put_user_asm(x, addr, err, insn) \
176 "3:\n\t" \ 115do { \
177 "mov.l 4f, %0\n\t" \ 116 __asm__ __volatile__ ( \
178 "jmp @%0\n\t" \ 117 "mov." insn " %0, %1\n\t" \
179 " mov %3, %0\n\t" \ 118 : /* no outputs */ \
180 ".balign 4\n" \ 119 : "r" (x), "m" (__m(addr)) \
181 "4: .long 2b\n\t" \ 120 : "memory" \
182 ".previous\n" \ 121 ); \
183 ".section __ex_table,\"a\"\n\t" \ 122} while (0)
184 ".long 1b, 3b\n\t" \ 123#endif /* CONFIG_MMU */
185 ".previous" \
186 :"=&r" (err) \
187 :"r" (x), "m" (__m(addr)), "i" (-EFAULT), "0" (err) \
188 :"memory"); })
189 124
190#if defined(CONFIG_CPU_LITTLE_ENDIAN) 125#if defined(CONFIG_CPU_LITTLE_ENDIAN)
191#define __put_user_u64(val,addr,retval) \ 126#define __put_user_u64(val,addr,retval) \
@@ -235,40 +170,7 @@ __asm__ __volatile__( \
235 170
236extern void __put_user_unknown(void); 171extern void __put_user_unknown(void);
237 172
238/* Generic arbitrary sized copy. */ 173static inline int
239/* Return the number of bytes NOT copied */
240__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
241
242
243static __always_inline unsigned long
244__copy_from_user(void *to, const void __user *from, unsigned long n)
245{
246 return __copy_user(to, (__force void *)from, n);
247}
248
249static __always_inline unsigned long __must_check
250__copy_to_user(void __user *to, const void *from, unsigned long n)
251{
252 return __copy_user((__force void *)to, from, n);
253}
254
255#define __copy_to_user_inatomic __copy_to_user
256#define __copy_from_user_inatomic __copy_from_user
257
258/*
259 * Clear the area and return remaining number of bytes
260 * (on failure. Usually it's 0.)
261 */
262extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
263
264#define clear_user(addr,n) ({ \
265void * __cl_addr = (addr); \
266unsigned long __cl_size = (n); \
267if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
268__cl_size = __clear_user(__cl_addr, __cl_size); \
269__cl_size; })
270
271static __inline__ int
272__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) 174__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
273{ 175{
274 __kernel_size_t res; 176 __kernel_size_t res;
@@ -307,37 +209,11 @@ __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __coun
307 return res; 209 return res;
308} 210}
309 211
310/**
311 * strncpy_from_user: - Copy a NUL terminated string from userspace.
312 * @dst: Destination address, in kernel space. This buffer must be at
313 * least @count bytes long.
314 * @src: Source address, in user space.
315 * @count: Maximum number of bytes to copy, including the trailing NUL.
316 *
317 * Copies a NUL-terminated string from userspace to kernel space.
318 *
319 * On success, returns the length of the string (not including the trailing
320 * NUL).
321 *
322 * If access to userspace fails, returns -EFAULT (some data may have been
323 * copied).
324 *
325 * If @count is smaller than the length of the string, copies @count bytes
326 * and returns @count.
327 */
328#define strncpy_from_user(dest,src,count) ({ \
329unsigned long __sfu_src = (unsigned long) (src); \
330int __sfu_count = (int) (count); \
331long __sfu_res = -EFAULT; \
332if(__access_ok(__sfu_src, __sfu_count)) { \
333__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
334} __sfu_res; })
335
336/* 212/*
337 * Return the size of a string (including the ending 0 even when we have 213 * Return the size of a string (including the ending 0 even when we have
338 * exceeded the maximum string length). 214 * exceeded the maximum string length).
339 */ 215 */
340static __inline__ long __strnlen_user(const char __user *__s, long __n) 216static inline long __strnlen_user(const char __user *__s, long __n)
341{ 217{
342 unsigned long res; 218 unsigned long res;
343 unsigned long __dummy; 219 unsigned long __dummy;
@@ -369,61 +245,4 @@ static __inline__ long __strnlen_user(const char __user *__s, long __n)
369 return res; 245 return res;
370} 246}
371 247
372/**
373 * strnlen_user: - Get the size of a string in user space.
374 * @s: The string to measure.
375 * @n: The maximum valid length
376 *
377 * Context: User context only. This function may sleep.
378 *
379 * Get the size of a NUL-terminated string in user space.
380 *
381 * Returns the size of the string INCLUDING the terminating NUL.
382 * On exception, returns 0.
383 * If the string is too long, returns a value greater than @n.
384 */
385static __inline__ long strnlen_user(const char __user *s, long n)
386{
387 if (!__addr_ok(s))
388 return 0;
389 else
390 return __strnlen_user(s, n);
391}
392
393/**
394 * strlen_user: - Get the size of a string in user space.
395 * @str: The string to measure.
396 *
397 * Context: User context only. This function may sleep.
398 *
399 * Get the size of a NUL-terminated string in user space.
400 *
401 * Returns the size of the string INCLUDING the terminating NUL.
402 * On exception, returns 0.
403 *
404 * If there is a limit on the length of a valid string, you may wish to
405 * consider using strnlen_user() instead.
406 */
407#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
408
409/*
410 * The exception table consists of pairs of addresses: the first is the
411 * address of an instruction that is allowed to fault, and the second is
412 * the address at which the program should continue. No registers are
413 * modified, so it is entirely up to the continuation code to figure out
414 * what to do.
415 *
416 * All the routines below use bits of fixup code that are out of line
417 * with the main instruction path. This means when everything is well,
418 * we don't even have to jump over them. Further, they do not intrude
419 * on our cache or tlb entries.
420 */
421
422struct exception_table_entry
423{
424 unsigned long insn, fixup;
425};
426
427extern int fixup_exception(struct pt_regs *regs);
428
429#endif /* __ASM_SH_UACCESS_32_H */ 248#endif /* __ASM_SH_UACCESS_32_H */