aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2009-05-13 18:56:37 -0400
committerArnd Bergmann <arnd@klappe2.(none)>2009-06-11 15:02:50 -0400
commiteed417ddd52146f446595b5a7d8f21b1814b95b7 (patch)
tree272b71df46768a0d37bfb36e0cd701f01b109035 /include/asm-generic
parent5c01b46bb6bb8f2662573c05c87b5d68fa25af89 (diff)
asm-generic: add a generic uaccess.h
Based on discussions with Michal Simek and code from m68knommu and h8300, this version of uaccess.h should be usable by most architectures, by overriding some parts of it. Simple NOMMU architectures can use it out of the box, but a minimal __access_ok() should be added there as well. Cc: Michal Simek <monstr@monstr.eu> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/uaccess.h325
1 files changed, 325 insertions, 0 deletions
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
new file mode 100644
index 000000000000..6d8cab22e294
--- /dev/null
+++ b/include/asm-generic/uaccess.h
@@ -0,0 +1,325 @@
1#ifndef __ASM_GENERIC_UACCESS_H
2#define __ASM_GENERIC_UACCESS_H
3
4/*
5 * User space memory access functions, these should work
6 * on a ny machine that has kernel and user data in the same
7 * address space, e.g. all NOMMU machines.
8 */
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/string.h>
12
13#include <asm/segment.h>
14
15#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
16
17#ifndef KERNEL_DS
18#define KERNEL_DS MAKE_MM_SEG(~0UL)
19#endif
20
21#ifndef USER_DS
22#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
23#endif
24
25#ifndef get_fs
26#define get_ds() (KERNEL_DS)
27#define get_fs() (current_thread_info()->addr_limit)
28
29static inline void set_fs(mm_segment_t fs)
30{
31 current_thread_info()->addr_limit = fs;
32}
33#endif
34
35#define segment_eq(a, b) ((a).seg == (b).seg)
36
37#define VERIFY_READ 0
38#define VERIFY_WRITE 1
39
40#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
41
42/*
43 * The architecture should really override this if possible, at least
44 * doing a check on the get_fs()
45 */
46#ifndef __access_ok
47static inline int __access_ok(unsigned long addr, unsigned long size)
48{
49 return 1;
50}
51#endif
52
53/*
54 * The exception table consists of pairs of addresses: the first is the
55 * address of an instruction that is allowed to fault, and the second is
56 * the address at which the program should continue. No registers are
57 * modified, so it is entirely up to the continuation code to figure out
58 * what to do.
59 *
60 * All the routines below use bits of fixup code that are out of line
61 * with the main instruction path. This means when everything is well,
62 * we don't even have to jump over them. Further, they do not intrude
63 * on our cache or tlb entries.
64 */
65
66struct exception_table_entry
67{
68 unsigned long insn, fixup;
69};
70
71/* Returns 0 if exception not found and fixup otherwise. */
72extern unsigned long search_exception_table(unsigned long);
73
74/*
75 * architectures with an MMU should override these two
76 */
77#ifndef __copy_from_user
78static inline __must_check long __copy_from_user(void *to,
79 const void __user * from, unsigned long n)
80{
81 if (__builtin_constant_p(n)) {
82 switch(n) {
83 case 1:
84 *(u8 *)to = *(u8 __force *)from;
85 return 0;
86 case 2:
87 *(u16 *)to = *(u16 __force *)from;
88 return 0;
89 case 4:
90 *(u32 *)to = *(u32 __force *)from;
91 return 0;
92#ifdef CONFIG_64BIT
93 case 8:
94 *(u64 *)to = *(u64 __force *)from;
95 return 0;
96#endif
97 default:
98 break;
99 }
100 }
101
102 memcpy(to, (const void __force *)from, n);
103 return 0;
104}
105#endif
106
107#ifndef __copy_to_user
108static inline __must_check long __copy_to_user(void __user *to,
109 const void *from, unsigned long n)
110{
111 if (__builtin_constant_p(n)) {
112 switch(n) {
113 case 1:
114 *(u8 __force *)to = *(u8 *)from;
115 return 0;
116 case 2:
117 *(u16 __force *)to = *(u16 *)from;
118 return 0;
119 case 4:
120 *(u32 __force *)to = *(u32 *)from;
121 return 0;
122#ifdef CONFIG_64BIT
123 case 8:
124 *(u64 __force *)to = *(u64 *)from;
125 return 0;
126#endif
127 default:
128 break;
129 }
130 }
131
132 memcpy((void __force *)to, from, n);
133 return 0;
134}
135#endif
136
137/*
138 * These are the main single-value transfer routines. They automatically
139 * use the right size if we just have the right pointer type.
140 * This version just falls back to copy_{from,to}_user, which should
141 * provide a fast-path for small values.
142 */
143#define __put_user(x, ptr) \
144({ \
145 __typeof__(*(ptr)) __x = (x); \
146 int __pu_err = -EFAULT; \
147 __chk_user_ptr(ptr); \
148 switch (sizeof (*(ptr))) { \
149 case 1: \
150 case 2: \
151 case 4: \
152 case 8: \
153 __pu_err = __put_user_fn(sizeof (*(ptr)), \
154 ptr, &__x); \
155 break; \
156 default: \
157 __put_user_bad(); \
158 break; \
159 } \
160 __pu_err; \
161})
162
163#define put_user(x, ptr) \
164({ \
165 might_sleep(); \
166 __access_ok(ptr, sizeof (*ptr)) ? \
167 __put_user(x, ptr) : \
168 -EFAULT; \
169})
170
171static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
172{
173 size = __copy_to_user(ptr, x, size);
174 return size ? -EFAULT : size;
175}
176
177extern int __put_user_bad(void) __attribute__((noreturn));
178
179#define __get_user(x, ptr) \
180({ \
181 int __gu_err = -EFAULT; \
182 __chk_user_ptr(ptr); \
183 switch (sizeof(*(ptr))) { \
184 case 1: { \
185 unsigned char __x; \
186 __gu_err = __get_user_fn(sizeof (*(ptr)), \
187 ptr, &__x); \
188 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
189 break; \
190 }; \
191 case 2: { \
192 unsigned short __x; \
193 __gu_err = __get_user_fn(sizeof (*(ptr)), \
194 ptr, &__x); \
195 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
196 break; \
197 }; \
198 case 4: { \
199 unsigned int __x; \
200 __gu_err = __get_user_fn(sizeof (*(ptr)), \
201 ptr, &__x); \
202 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
203 break; \
204 }; \
205 case 8: { \
206 unsigned long long __x; \
207 __gu_err = __get_user_fn(sizeof (*(ptr)), \
208 ptr, &__x); \
209 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
210 break; \
211 }; \
212 default: \
213 __get_user_bad(); \
214 break; \
215 } \
216 __gu_err; \
217})
218
219#define get_user(x, ptr) \
220({ \
221 might_sleep(); \
222 __access_ok(ptr, sizeof (*ptr)) ? \
223 __get_user(x, ptr) : \
224 -EFAULT; \
225})
226
227static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
228{
229 size = __copy_from_user(x, ptr, size);
230 return size ? -EFAULT : size;
231}
232
233extern int __get_user_bad(void) __attribute__((noreturn));
234
235#ifndef __copy_from_user_inatomic
236#define __copy_from_user_inatomic __copy_from_user
237#endif
238
239#ifndef __copy_to_user_inatomic
240#define __copy_to_user_inatomic __copy_to_user
241#endif
242
243static inline long copy_from_user(void *to,
244 const void __user * from, unsigned long n)
245{
246 might_sleep();
247 if (__access_ok(from, n))
248 return __copy_from_user(to, from, n);
249 else
250 return n;
251}
252
253static inline long copy_to_user(void __user *to,
254 const void *from, unsigned long n)
255{
256 might_sleep();
257 if (__access_ok(to, n))
258 return __copy_to_user(to, from, n);
259 else
260 return n;
261}
262
263/*
264 * Copy a null terminated string from userspace.
265 */
266#ifndef __strncpy_from_user
267static inline long
268__strncpy_from_user(char *dst, const char __user *src, long count)
269{
270 char *tmp;
271 strncpy(dst, (const char __force *)src, count);
272 for (tmp = dst; *tmp && count > 0; tmp++, count--)
273 ;
274 return (tmp - dst);
275}
276#endif
277
278static inline long
279strncpy_from_user(char *dst, const char __user *src, long count)
280{
281 if (!__access_ok(src, 1))
282 return -EFAULT;
283 return __strncpy_from_user(dst, src, count);
284}
285
286/*
287 * Return the size of a string (including the ending 0)
288 *
289 * Return 0 on exception, a value greater than N if too long
290 */
291#ifndef strnlen_user
292static inline long strnlen_user(const char __user *src, long n)
293{
294 return strlen((void * __force)src) + 1;
295}
296#endif
297
298static inline long strlen_user(const char __user *src)
299{
300 return strnlen_user(src, 32767);
301}
302
303/*
304 * Zero Userspace
305 */
306#ifndef __clear_user
307static inline __must_check unsigned long
308__clear_user(void __user *to, unsigned long n)
309{
310 memset((void __force *)to, 0, n);
311 return 0;
312}
313#endif
314
315static inline __must_check unsigned long
316clear_user(void __user *to, unsigned long n)
317{
318 might_sleep();
319 if (!__access_ok(to, n))
320 return n;
321
322 return __clear_user(to, n);
323}
324
325#endif /* __ASM_GENERIC_UACCESS_H */