aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-06-03 07:05:39 -0400
committerPaul Mundt <lethal@linux-sh.org>2008-07-28 05:10:29 -0400
commit42fd3b142d8867f5b58d6fb75592cd20fd654c1b (patch)
treeb05b9543e34e008f8d466bb3a464e1f9e3081817 /include
parent31f6a11fe764dc580b645d7aa878854fa9e85a06 (diff)
sh: Initial consolidation of the _32/_64 uaccess split.
This consolidates everything but the bare assembly routines, which we will sync up in a follow-up patch. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh/uaccess.h222
-rw-r--r--include/asm-sh/uaccess_32.h277
-rw-r--r--include/asm-sh/uaccess_64.h187
3 files changed, 270 insertions, 416 deletions
diff --git a/include/asm-sh/uaccess.h b/include/asm-sh/uaccess.h
index b3440c305b5d..45c2c9b2993d 100644
--- a/include/asm-sh/uaccess.h
+++ b/include/asm-sh/uaccess.h
@@ -1,12 +1,171 @@
1#ifndef __ASM_SH_UACCESS_H 1#ifndef __ASM_SH_UACCESS_H
2#define __ASM_SH_UACCESS_H 2#define __ASM_SH_UACCESS_H
3 3
4#include <linux/errno.h>
5#include <linux/sched.h>
6#include <asm/segment.h>
7
8#define VERIFY_READ 0
9#define VERIFY_WRITE 1
10
11#define __addr_ok(addr) \
12 ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
13
14/*
15 * __access_ok: Check if address with size is OK or not.
16 *
17 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
18 *
19 * sum := addr + size; carry? --> flag = true;
20 * if (sum >= addr_limit) flag = true;
21 */
22#define __access_ok(addr, size) \
23 (__addr_ok((addr) + (size)))
24#define access_ok(type, addr, size) \
25 (__chk_user_ptr(addr), \
26 __access_ok((unsigned long __force)(addr), (size)))
27
28/*
29 * Uh, these should become the main single-value transfer routines ...
30 * They automatically use the right size if we just have the right
31 * pointer type ...
32 *
33 * As SuperH uses the same address space for kernel and user data, we
34 * can just do these as direct assignments.
35 *
36 * Careful to not
37 * (a) re-use the arguments for side effects (sizeof is ok)
38 * (b) require any knowledge of processes at this stage
39 */
40#define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
41#define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
42
43/*
44 * The "__xxx" versions do not do address space checking, useful when
45 * doing multiple accesses to the same area (the user has to do the
46 * checks by hand with "access_ok()")
47 */
48#define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
49#define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
50
51struct __large_struct { unsigned long buf[100]; };
52#define __m(x) (*(struct __large_struct __user *)(x))
53
54#define __get_user_nocheck(x,ptr,size) \
55({ \
56 long __gu_err; \
57 unsigned long __gu_val; \
58 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
59 __chk_user_ptr(ptr); \
60 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
61 (x) = (__typeof__(*(ptr)))__gu_val; \
62 __gu_err; \
63})
64
65#define __get_user_check(x,ptr,size) \
66({ \
67 long __gu_err = -EFAULT; \
68 unsigned long __gu_val = 0; \
69 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
70 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
71 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
72 (x) = (__typeof__(*(ptr)))__gu_val; \
73 __gu_err; \
74})
75
76#define __put_user_nocheck(x,ptr,size) \
77({ \
78 long __pu_err; \
79 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
80 __chk_user_ptr(ptr); \
81 __put_user_size((x), __pu_addr, (size), __pu_err); \
82 __pu_err; \
83})
84
85#define __put_user_check(x,ptr,size) \
86({ \
87 long __pu_err = -EFAULT; \
88 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
89 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
90 __put_user_size((x), __pu_addr, (size), \
91 __pu_err); \
92 __pu_err; \
93})
94
4#ifdef CONFIG_SUPERH32 95#ifdef CONFIG_SUPERH32
5# include "uaccess_32.h" 96# include "uaccess_32.h"
6#else 97#else
7# include "uaccess_64.h" 98# include "uaccess_64.h"
8#endif 99#endif
9 100
101/* Generic arbitrary sized copy. */
102/* Return the number of bytes NOT copied */
103__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
104
105static __always_inline unsigned long
106__copy_from_user(void *to, const void __user *from, unsigned long n)
107{
108 return __copy_user(to, (__force void *)from, n);
109}
110
111static __always_inline unsigned long __must_check
112__copy_to_user(void __user *to, const void *from, unsigned long n)
113{
114 return __copy_user((__force void *)to, from, n);
115}
116
117#define __copy_to_user_inatomic __copy_to_user
118#define __copy_from_user_inatomic __copy_from_user
119
120/*
121 * Clear the area and return remaining number of bytes
122 * (on failure. Usually it's 0.)
123 */
124__kernel_size_t __clear_user(void *addr, __kernel_size_t size);
125
126#define clear_user(addr,n) \
127({ \
128 void __user * __cl_addr = (addr); \
129 unsigned long __cl_size = (n); \
130 \
131 if (__cl_size && access_ok(VERIFY_WRITE, \
132 ((unsigned long)(__cl_addr)), __cl_size)) \
133 __cl_size = __clear_user(__cl_addr, __cl_size); \
134 \
135 __cl_size; \
136})
137
138/**
139 * strncpy_from_user: - Copy a NUL terminated string from userspace.
140 * @dst: Destination address, in kernel space. This buffer must be at
141 * least @count bytes long.
142 * @src: Source address, in user space.
143 * @count: Maximum number of bytes to copy, including the trailing NUL.
144 *
145 * Copies a NUL-terminated string from userspace to kernel space.
146 *
147 * On success, returns the length of the string (not including the trailing
148 * NUL).
149 *
150 * If access to userspace fails, returns -EFAULT (some data may have been
151 * copied).
152 *
153 * If @count is smaller than the length of the string, copies @count bytes
154 * and returns @count.
155 */
156#define strncpy_from_user(dest,src,count) \
157({ \
158 unsigned long __sfu_src = (unsigned long)(src); \
159 int __sfu_count = (int)(count); \
160 long __sfu_res = -EFAULT; \
161 \
162 if (__access_ok(__sfu_src, __sfu_count)) \
163 __sfu_res = __strncpy_from_user((unsigned long)(dest), \
164 __sfu_src, __sfu_count); \
165 \
166 __sfu_res; \
167})
168
10static inline unsigned long 169static inline unsigned long
11copy_from_user(void *to, const void __user *from, unsigned long n) 170copy_from_user(void *to, const void __user *from, unsigned long n)
12{ 171{
@@ -31,4 +190,67 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
31 return __copy_size; 190 return __copy_size;
32} 191}
33 192
193/**
194 * strnlen_user: - Get the size of a string in user space.
195 * @s: The string to measure.
196 * @n: The maximum valid length
197 *
198 * Context: User context only. This function may sleep.
199 *
200 * Get the size of a NUL-terminated string in user space.
201 *
202 * Returns the size of the string INCLUDING the terminating NUL.
203 * On exception, returns 0.
204 * If the string is too long, returns a value greater than @n.
205 */
206static inline long strnlen_user(const char __user *s, long n)
207{
208 if (!__addr_ok(s))
209 return 0;
210 else
211 return __strnlen_user(s, n);
212}
213
214/**
215 * strlen_user: - Get the size of a string in user space.
216 * @str: The string to measure.
217 *
218 * Context: User context only. This function may sleep.
219 *
220 * Get the size of a NUL-terminated string in user space.
221 *
222 * Returns the size of the string INCLUDING the terminating NUL.
223 * On exception, returns 0.
224 *
225 * If there is a limit on the length of a valid string, you may wish to
226 * consider using strnlen_user() instead.
227 */
228#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
229
230/*
231 * The exception table consists of pairs of addresses: the first is the
232 * address of an instruction that is allowed to fault, and the second is
233 * the address at which the program should continue. No registers are
234 * modified, so it is entirely up to the continuation code to figure out
235 * what to do.
236 *
237 * All the routines below use bits of fixup code that are out of line
238 * with the main instruction path. This means when everything is well,
239 * we don't even have to jump over them. Further, they do not intrude
240 * on our cache or tlb entries.
241 */
242struct exception_table_entry {
243 unsigned long insn, fixup;
244};
245
246#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
247#define ARCH_HAS_SEARCH_EXTABLE
248#endif
249
250int fixup_exception(struct pt_regs *regs);
251/* Returns 0 if exception not found and fixup.unit otherwise. */
252unsigned long search_exception_table(unsigned long addr);
253const struct exception_table_entry *search_exception_tables(unsigned long addr);
254
255
34#endif /* __ASM_SH_UACCESS_H */ 256#endif /* __ASM_SH_UACCESS_H */
diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h
index 44abd1682329..ae0d24f6653f 100644
--- a/include/asm-sh/uaccess_32.h
+++ b/include/asm-sh/uaccess_32.h
@@ -12,56 +12,6 @@
12#ifndef __ASM_SH_UACCESS_32_H 12#ifndef __ASM_SH_UACCESS_32_H
13#define __ASM_SH_UACCESS_32_H 13#define __ASM_SH_UACCESS_32_H
14 14
15#include <linux/errno.h>
16#include <linux/sched.h>
17#include <asm/segment.h>
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22#define __addr_ok(addr) \
23 ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
24
25/*
26 * __access_ok: Check if address with size is OK or not.
27 *
28 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
29 *
30 * sum := addr + size; carry? --> flag = true;
31 * if (sum >= addr_limit) flag = true;
32 */
33#define __access_ok(addr, size) \
34 (__addr_ok((addr) + (size)))
35#define access_ok(type, addr, size) \
36 (__chk_user_ptr(addr), \
37 __access_ok((unsigned long __force)(addr), (size)))
38
39/*
40 * Uh, these should become the main single-value transfer routines ...
41 * They automatically use the right size if we just have the right
42 * pointer type ...
43 *
44 * As SuperH uses the same address space for kernel and user data, we
45 * can just do these as direct assignments.
46 *
47 * Careful to not
48 * (a) re-use the arguments for side effects (sizeof is ok)
49 * (b) require any knowledge of processes at this stage
50 */
51#define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
52#define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
53
54/*
55 * The "__xxx" versions do not do address space checking, useful when
56 * doing multiple accesses to the same area (the user has to do the
57 * checks by hand with "access_ok()")
58 */
59#define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
60#define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
61
62struct __large_struct { unsigned long buf[100]; };
63#define __m(x) (*(struct __large_struct __user *)(x))
64
65#define __get_user_size(x,ptr,size,retval) \ 15#define __get_user_size(x,ptr,size,retval) \
66do { \ 16do { \
67 retval = 0; \ 17 retval = 0; \
@@ -81,28 +31,7 @@ do { \
81 } \ 31 } \
82} while (0) 32} while (0)
83 33
84#define __get_user_nocheck(x,ptr,size) \ 34#ifdef CONFIG_MMU
85({ \
86 long __gu_err; \
87 unsigned long __gu_val; \
88 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
89 __chk_user_ptr(ptr); \
90 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
91 (x) = (__typeof__(*(ptr)))__gu_val; \
92 __gu_err; \
93})
94
95#define __get_user_check(x,ptr,size) \
96({ \
97 long __gu_err = -EFAULT; \
98 unsigned long __gu_val = 0; \
99 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
100 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
101 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
102 (x) = (__typeof__(*(ptr)))__gu_val; \
103 __gu_err; \
104})
105
106#define __get_user_asm(x, addr, err, insn) \ 35#define __get_user_asm(x, addr, err, insn) \
107({ \ 36({ \
108__asm__ __volatile__( \ 37__asm__ __volatile__( \
@@ -123,6 +52,16 @@ __asm__ __volatile__( \
123 ".previous" \ 52 ".previous" \
124 :"=&r" (err), "=&r" (x) \ 53 :"=&r" (err), "=&r" (x) \
125 :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); }) 54 :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
55#else
56#define __get_user_asm(x, addr, err, insn) \
57do { \
58 __asm__ __volatile__ ( \
59 "mov." insn " %1, %0\n\t" \
60 : "=&r" (x) \
61 : "m" (__m(addr)) \
62 ); \
63} while (0)
64#endif /* CONFIG_MMU */
126 65
127extern void __get_user_unknown(void); 66extern void __get_user_unknown(void);
128 67
@@ -147,45 +86,41 @@ do { \
147 } \ 86 } \
148} while (0) 87} while (0)
149 88
150#define __put_user_nocheck(x,ptr,size) \ 89#ifdef CONFIG_MMU
151({ \ 90#define __put_user_asm(x, addr, err, insn) \
152 long __pu_err; \ 91do { \
153 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 92 __asm__ __volatile__ ( \
154 __chk_user_ptr(ptr); \ 93 "1:\n\t" \
155 __put_user_size((x), __pu_addr, (size), __pu_err); \ 94 "mov." insn " %1, %2\n\t" \
156 __pu_err; \ 95 "2:\n" \
157}) 96 ".section .fixup,\"ax\"\n" \
158 97 "3:\n\t" \
159#define __put_user_check(x,ptr,size) \ 98 "mov.l 4f, %0\n\t" \
160({ \ 99 "jmp @%0\n\t" \
161 long __pu_err = -EFAULT; \ 100 " mov %3, %0\n\t" \
162 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 101 ".balign 4\n" \
163 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \ 102 "4: .long 2b\n\t" \
164 __put_user_size((x), __pu_addr, (size), \ 103 ".previous\n" \
165 __pu_err); \ 104 ".section __ex_table,\"a\"\n\t" \
166 __pu_err; \ 105 ".long 1b, 3b\n\t" \
167}) 106 ".previous" \
168 107 : "=&r" (err) \
169#define __put_user_asm(x, addr, err, insn) \ 108 : "r" (x), "m" (__m(addr)), "i" (-EFAULT), \
170({ \ 109 "0" (err) \
171__asm__ __volatile__( \ 110 : "memory" \
172 "1:\n\t" \ 111 ); \
173 "mov." insn " %1, %2\n\t" \ 112} while (0)
174 "2:\n" \ 113#else
175 ".section .fixup,\"ax\"\n" \ 114#define __put_user_asm(x, addr, err, insn) \
176 "3:\n\t" \ 115do { \
177 "mov.l 4f, %0\n\t" \ 116 __asm__ __volatile__ ( \
178 "jmp @%0\n\t" \ 117 "mov." insn " %0, %1\n\t" \
179 " mov %3, %0\n\t" \ 118 : /* no outputs */ \
180 ".balign 4\n" \ 119 : "r" (x), "m" (__m(addr)) \
181 "4: .long 2b\n\t" \ 120 : "memory" \
182 ".previous\n" \ 121 ); \
183 ".section __ex_table,\"a\"\n\t" \ 122} while (0)
184 ".long 1b, 3b\n\t" \ 123#endif /* CONFIG_MMU */
185 ".previous" \
186 :"=&r" (err) \
187 :"r" (x), "m" (__m(addr)), "i" (-EFAULT), "0" (err) \
188 :"memory"); })
189 124
190#if defined(CONFIG_CPU_LITTLE_ENDIAN) 125#if defined(CONFIG_CPU_LITTLE_ENDIAN)
191#define __put_user_u64(val,addr,retval) \ 126#define __put_user_u64(val,addr,retval) \
@@ -235,40 +170,7 @@ __asm__ __volatile__( \
235 170
236extern void __put_user_unknown(void); 171extern void __put_user_unknown(void);
237 172
238/* Generic arbitrary sized copy. */ 173static inline int
239/* Return the number of bytes NOT copied */
240__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
241
242
243static __always_inline unsigned long
244__copy_from_user(void *to, const void __user *from, unsigned long n)
245{
246 return __copy_user(to, (__force void *)from, n);
247}
248
249static __always_inline unsigned long __must_check
250__copy_to_user(void __user *to, const void *from, unsigned long n)
251{
252 return __copy_user((__force void *)to, from, n);
253}
254
255#define __copy_to_user_inatomic __copy_to_user
256#define __copy_from_user_inatomic __copy_from_user
257
258/*
259 * Clear the area and return remaining number of bytes
260 * (on failure. Usually it's 0.)
261 */
262extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
263
264#define clear_user(addr,n) ({ \
265void * __cl_addr = (addr); \
266unsigned long __cl_size = (n); \
267if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
268__cl_size = __clear_user(__cl_addr, __cl_size); \
269__cl_size; })
270
271static __inline__ int
272__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) 174__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
273{ 175{
274 __kernel_size_t res; 176 __kernel_size_t res;
@@ -307,37 +209,11 @@ __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __coun
307 return res; 209 return res;
308} 210}
309 211
310/**
311 * strncpy_from_user: - Copy a NUL terminated string from userspace.
312 * @dst: Destination address, in kernel space. This buffer must be at
313 * least @count bytes long.
314 * @src: Source address, in user space.
315 * @count: Maximum number of bytes to copy, including the trailing NUL.
316 *
317 * Copies a NUL-terminated string from userspace to kernel space.
318 *
319 * On success, returns the length of the string (not including the trailing
320 * NUL).
321 *
322 * If access to userspace fails, returns -EFAULT (some data may have been
323 * copied).
324 *
325 * If @count is smaller than the length of the string, copies @count bytes
326 * and returns @count.
327 */
328#define strncpy_from_user(dest,src,count) ({ \
329unsigned long __sfu_src = (unsigned long) (src); \
330int __sfu_count = (int) (count); \
331long __sfu_res = -EFAULT; \
332if(__access_ok(__sfu_src, __sfu_count)) { \
333__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
334} __sfu_res; })
335
336/* 212/*
337 * Return the size of a string (including the ending 0 even when we have 213 * Return the size of a string (including the ending 0 even when we have
338 * exceeded the maximum string length). 214 * exceeded the maximum string length).
339 */ 215 */
340static __inline__ long __strnlen_user(const char __user *__s, long __n) 216static inline long __strnlen_user(const char __user *__s, long __n)
341{ 217{
342 unsigned long res; 218 unsigned long res;
343 unsigned long __dummy; 219 unsigned long __dummy;
@@ -369,61 +245,4 @@ static __inline__ long __strnlen_user(const char __user *__s, long __n)
369 return res; 245 return res;
370} 246}
371 247
372/**
373 * strnlen_user: - Get the size of a string in user space.
374 * @s: The string to measure.
375 * @n: The maximum valid length
376 *
377 * Context: User context only. This function may sleep.
378 *
379 * Get the size of a NUL-terminated string in user space.
380 *
381 * Returns the size of the string INCLUDING the terminating NUL.
382 * On exception, returns 0.
383 * If the string is too long, returns a value greater than @n.
384 */
385static __inline__ long strnlen_user(const char __user *s, long n)
386{
387 if (!__addr_ok(s))
388 return 0;
389 else
390 return __strnlen_user(s, n);
391}
392
393/**
394 * strlen_user: - Get the size of a string in user space.
395 * @str: The string to measure.
396 *
397 * Context: User context only. This function may sleep.
398 *
399 * Get the size of a NUL-terminated string in user space.
400 *
401 * Returns the size of the string INCLUDING the terminating NUL.
402 * On exception, returns 0.
403 *
404 * If there is a limit on the length of a valid string, you may wish to
405 * consider using strnlen_user() instead.
406 */
407#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
408
409/*
410 * The exception table consists of pairs of addresses: the first is the
411 * address of an instruction that is allowed to fault, and the second is
412 * the address at which the program should continue. No registers are
413 * modified, so it is entirely up to the continuation code to figure out
414 * what to do.
415 *
416 * All the routines below use bits of fixup code that are out of line
417 * with the main instruction path. This means when everything is well,
418 * we don't even have to jump over them. Further, they do not intrude
419 * on our cache or tlb entries.
420 */
421
422struct exception_table_entry
423{
424 unsigned long insn, fixup;
425};
426
427extern int fixup_exception(struct pt_regs *regs);
428
429#endif /* __ASM_SH_UACCESS_32_H */ 248#endif /* __ASM_SH_UACCESS_32_H */
diff --git a/include/asm-sh/uaccess_64.h b/include/asm-sh/uaccess_64.h
index 5833754dc747..81b3d515fcb3 100644
--- a/include/asm-sh/uaccess_64.h
+++ b/include/asm-sh/uaccess_64.h
@@ -20,68 +20,6 @@
20 * License. See the file "COPYING" in the main directory of this archive 20 * License. See the file "COPYING" in the main directory of this archive
21 * for more details. 21 * for more details.
22 */ 22 */
23#include <linux/errno.h>
24#include <linux/sched.h>
25
26#define VERIFY_READ 0
27#define VERIFY_WRITE 1
28
29#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
30
31/*
32 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
33 *
34 * sum := addr + size; carry? --> flag = true;
35 * if (sum >= addr_limit) flag = true;
36 */
37#define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1)
38
39#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
40#define __access_ok(addr,size) (__range_ok(addr,size) == 0)
41
42/*
43 * Uh, these should become the main single-value transfer routines ...
44 * They automatically use the right size if we just have the right
45 * pointer type ...
46 *
47 * As MIPS uses the same address space for kernel and user data, we
48 * can just do these as direct assignments.
49 *
50 * Careful to not
51 * (a) re-use the arguments for side effects (sizeof is ok)
52 * (b) require any knowledge of processes at this stage
53 */
54#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
55#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
56
57/*
58 * The "__xxx" versions do not do address space checking, useful when
59 * doing multiple accesses to the same area (the user has to do the
60 * checks by hand with "access_ok()")
61 */
62#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
63#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
64
65/*
66 * The "xxx_ret" versions return constant specified in third argument, if
67 * something bad happens. These macros can be optimized for the
68 * case of just returning from the function xxx_ret is used.
69 */
70
71#define put_user_ret(x,ptr,ret) ({ \
72if (put_user(x,ptr)) return ret; })
73
74#define get_user_ret(x,ptr,ret) ({ \
75if (get_user(x,ptr)) return ret; })
76
77#define __put_user_ret(x,ptr,ret) ({ \
78if (__put_user(x,ptr)) return ret; })
79
80#define __get_user_ret(x,ptr,ret) ({ \
81if (__get_user(x,ptr)) return ret; })
82
83struct __large_struct { unsigned long buf[100]; };
84#define __m(x) (*(struct __large_struct *)(x))
85 23
86#define __get_user_size(x,ptr,size,retval) \ 24#define __get_user_size(x,ptr,size,retval) \
87do { \ 25do { \
@@ -105,26 +43,6 @@ do { \
105 } \ 43 } \
106} while (0) 44} while (0)
107 45
108#define __get_user_nocheck(x,ptr,size) \
109({ \
110 long __gu_err, __gu_val; \
111 __get_user_size((void *)&__gu_val, (long)(ptr), \
112 (size), __gu_err); \
113 (x) = (__typeof__(*(ptr)))__gu_val; \
114 __gu_err; \
115})
116
117#define __get_user_check(x,ptr,size) \
118({ \
119 long __gu_addr = (long)(ptr); \
120 long __gu_err = -EFAULT, __gu_val; \
121 if (__access_ok(__gu_addr, (size))) \
122 __get_user_size((void *)&__gu_val, __gu_addr, \
123 (size), __gu_err); \
124 (x) = (__typeof__(*(ptr))) __gu_val; \
125 __gu_err; \
126})
127
128extern long __get_user_asm_b(void *, long); 46extern long __get_user_asm_b(void *, long);
129extern long __get_user_asm_w(void *, long); 47extern long __get_user_asm_w(void *, long);
130extern long __get_user_asm_l(void *, long); 48extern long __get_user_asm_l(void *, long);
@@ -152,115 +70,10 @@ do { \
152 } \ 70 } \
153} while (0) 71} while (0)
154 72
155#define __put_user_nocheck(x,ptr,size) \
156({ \
157 long __pu_err; \
158 __typeof__(*(ptr)) __pu_val = (x); \
159 __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \
160 __pu_err; \
161})
162
163#define __put_user_check(x,ptr,size) \
164({ \
165 long __pu_err = -EFAULT; \
166 long __pu_addr = (long)(ptr); \
167 __typeof__(*(ptr)) __pu_val = (x); \
168 \
169 if (__access_ok(__pu_addr, (size))) \
170 __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\
171 __pu_err; \
172})
173
174extern long __put_user_asm_b(void *, long); 73extern long __put_user_asm_b(void *, long);
175extern long __put_user_asm_w(void *, long); 74extern long __put_user_asm_w(void *, long);
176extern long __put_user_asm_l(void *, long); 75extern long __put_user_asm_l(void *, long);
177extern long __put_user_asm_q(void *, long); 76extern long __put_user_asm_q(void *, long);
178extern void __put_user_unknown(void); 77extern void __put_user_unknown(void);
179 78
180
181/* Generic arbitrary sized copy. */
182/* Return the number of bytes NOT copied */
183/* XXX: should be such that: 4byte and the rest. */
184extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
185
186#define copy_to_user_ret(to,from,n,retval) ({ \
187if (copy_to_user(to,from,n)) \
188 return retval; \
189})
190
191#define __copy_to_user(to,from,n) \
192 __copy_user((void *)(to), \
193 (void *)(from), n)
194
195#define __copy_to_user_ret(to,from,n,retval) ({ \
196if (__copy_to_user(to,from,n)) \
197 return retval; \
198})
199
200#define copy_from_user_ret(to,from,n,retval) ({ \
201if (copy_from_user(to,from,n)) \
202 return retval; \
203})
204
205#define __copy_from_user(to,from,n) \
206 __copy_user((void *)(to), \
207 (void *)(from), n)
208
209#define __copy_from_user_ret(to,from,n,retval) ({ \
210if (__copy_from_user(to,from,n)) \
211 return retval; \
212})
213
214#define __copy_to_user_inatomic __copy_to_user
215#define __copy_from_user_inatomic __copy_from_user
216
217/* XXX: Not sure it works well..
218 should be such that: 4byte clear and the rest. */
219extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
220
221#define clear_user(addr,n) ({ \
222void * __cl_addr = (addr); \
223unsigned long __cl_size = (n); \
224if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
225__cl_size = __clear_user(__cl_addr, __cl_size); \
226__cl_size; })
227
228extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count);
229
230#define strncpy_from_user(dest,src,count) ({ \
231unsigned long __sfu_src = (unsigned long) (src); \
232int __sfu_count = (int) (count); \
233long __sfu_res = -EFAULT; \
234if(__access_ok(__sfu_src, __sfu_count)) { \
235__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
236} __sfu_res; })
237
238#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
239
240/*
241 * Return the size of a string (including the ending 0!)
242 */
243extern long __strnlen_user(const char *__s, long __n);
244
245static inline long strnlen_user(const char *s, long n)
246{
247 if (!__addr_ok(s))
248 return 0;
249 else
250 return __strnlen_user(s, n);
251}
252
253struct exception_table_entry
254{
255 unsigned long insn, fixup;
256};
257
258#ifdef CONFIG_MMU
259#define ARCH_HAS_SEARCH_EXTABLE
260#endif
261
262/* Returns 0 if exception not found and fixup.unit otherwise. */
263extern unsigned long search_exception_table(unsigned long addr);
264extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
265
266#endif /* __ASM_SH_UACCESS_64_H */ 79#endif /* __ASM_SH_UACCESS_64_H */