aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/uaccess_32.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-11-10 05:55:50 -0500
committerPaul Mundt <lethal@linux-sh.org>2008-01-27 23:18:42 -0500
commit9b01bd9ee6408846c0553c03fb4b864353a845c9 (patch)
tree5627f9f9625228814b071db735378a106c1dfe72 /include/asm-sh/uaccess_32.h
parentc0acca6789281650134cfbbe00fc461e39440446 (diff)
sh: Split out uaccess.h in to _32 and _64 variants.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/uaccess_32.h')
-rw-r--r--include/asm-sh/uaccess_32.h575
1 files changed, 575 insertions, 0 deletions
diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h
new file mode 100644
index 000000000000..f18a1a5c95c0
--- /dev/null
+++ b/include/asm-sh/uaccess_32.h
@@ -0,0 +1,575 @@
1/* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $
2 *
3 * User space memory access functions
4 *
5 * Copyright (C) 1999, 2002 Niibe Yutaka
6 * Copyright (C) 2003 Paul Mundt
7 *
8 * Based on:
9 * MIPS implementation version 1.15 by
10 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
11 * and i386 version.
12 */
13#ifndef __ASM_SH_UACCESS_H
14#define __ASM_SH_UACCESS_H
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22/*
23 * The fs value determines whether argument validity checking should be
24 * performed or not. If get_fs() == USER_DS, checking is performed, with
25 * get_fs() == KERNEL_DS, checking is bypassed.
26 *
27 * For historical reasons (Data Segment Register?), these macros are misnamed.
28 */
29
30#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
31
32#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL)
33#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
34
35#define segment_eq(a,b) ((a).seg == (b).seg)
36
37#define get_ds() (KERNEL_DS)
38
39#if !defined(CONFIG_MMU)
40/* NOMMU is always true */
41#define __addr_ok(addr) (1)
42
43static inline mm_segment_t get_fs(void)
44{
45 return USER_DS;
46}
47
48static inline void set_fs(mm_segment_t s)
49{
50}
51
52/*
53 * __access_ok: Check if address with size is OK or not.
54 *
55 * If we don't have an MMU (or if its disabled) the only thing we really have
56 * to look out for is if the address resides somewhere outside of what
57 * available RAM we have.
58 *
59 * TODO: This check could probably also stand to be restricted somewhat more..
60 * though it still does the Right Thing(tm) for the time being.
61 */
62static inline int __access_ok(unsigned long addr, unsigned long size)
63{
64 return ((addr >= memory_start) && ((addr + size) < memory_end));
65}
66#else /* CONFIG_MMU */
67#define __addr_ok(addr) \
68 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
69
70#define get_fs() (current_thread_info()->addr_limit)
71#define set_fs(x) (current_thread_info()->addr_limit = (x))
72
73/*
74 * __access_ok: Check if address with size is OK or not.
75 *
76 * We do three checks:
77 * (1) is it user space?
78 * (2) addr + size --> carry?
79 * (3) addr + size >= 0x80000000 (PAGE_OFFSET)
80 *
81 * (1) (2) (3) | RESULT
82 * 0 0 0 | ok
83 * 0 0 1 | ok
84 * 0 1 0 | bad
85 * 0 1 1 | bad
86 * 1 0 0 | ok
87 * 1 0 1 | bad
88 * 1 1 0 | bad
89 * 1 1 1 | bad
90 */
91static inline int __access_ok(unsigned long addr, unsigned long size)
92{
93 unsigned long flag, tmp;
94
95 __asm__("stc r7_bank, %0\n\t"
96 "mov.l @(8,%0), %0\n\t"
97 "clrt\n\t"
98 "addc %2, %1\n\t"
99 "and %1, %0\n\t"
100 "rotcl %0\n\t"
101 "rotcl %0\n\t"
102 "and #3, %0"
103 : "=&z" (flag), "=r" (tmp)
104 : "r" (addr), "1" (size)
105 : "t");
106
107 return flag == 0;
108}
109#endif /* CONFIG_MMU */
110
111static inline int access_ok(int type, const void __user *p, unsigned long size)
112{
113 unsigned long addr = (unsigned long)p;
114 return __access_ok(addr, size);
115}
116
117/*
118 * Uh, these should become the main single-value transfer routines ...
119 * They automatically use the right size if we just have the right
120 * pointer type ...
121 *
122 * As SuperH uses the same address space for kernel and user data, we
123 * can just do these as direct assignments.
124 *
125 * Careful to not
126 * (a) re-use the arguments for side effects (sizeof is ok)
127 * (b) require any knowledge of processes at this stage
128 */
129#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
130#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
131
132/*
133 * The "__xxx" versions do not do address space checking, useful when
134 * doing multiple accesses to the same area (the user has to do the
135 * checks by hand with "access_ok()")
136 */
137#define __put_user(x,ptr) \
138 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
139#define __get_user(x,ptr) \
140 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
141
142struct __large_struct { unsigned long buf[100]; };
143#define __m(x) (*(struct __large_struct __user *)(x))
144
145#define __get_user_size(x,ptr,size,retval) \
146do { \
147 retval = 0; \
148 __chk_user_ptr(ptr); \
149 switch (size) { \
150 case 1: \
151 __get_user_asm(x, ptr, retval, "b"); \
152 break; \
153 case 2: \
154 __get_user_asm(x, ptr, retval, "w"); \
155 break; \
156 case 4: \
157 __get_user_asm(x, ptr, retval, "l"); \
158 break; \
159 default: \
160 __get_user_unknown(); \
161 break; \
162 } \
163} while (0)
164
165#define __get_user_nocheck(x,ptr,size) \
166({ \
167 long __gu_err, __gu_val; \
168 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
169 (x) = (__typeof__(*(ptr)))__gu_val; \
170 __gu_err; \
171})
172
173#ifdef CONFIG_MMU
174#define __get_user_check(x,ptr,size) \
175({ \
176 long __gu_err, __gu_val; \
177 __chk_user_ptr(ptr); \
178 switch (size) { \
179 case 1: \
180 __get_user_1(__gu_val, (ptr), __gu_err); \
181 break; \
182 case 2: \
183 __get_user_2(__gu_val, (ptr), __gu_err); \
184 break; \
185 case 4: \
186 __get_user_4(__gu_val, (ptr), __gu_err); \
187 break; \
188 default: \
189 __get_user_unknown(); \
190 break; \
191 } \
192 \
193 (x) = (__typeof__(*(ptr)))__gu_val; \
194 __gu_err; \
195})
196
197#define __get_user_1(x,addr,err) ({ \
198__asm__("stc r7_bank, %1\n\t" \
199 "mov.l @(8,%1), %1\n\t" \
200 "and %2, %1\n\t" \
201 "cmp/pz %1\n\t" \
202 "bt/s 1f\n\t" \
203 " mov #0, %0\n\t" \
204 "0:\n" \
205 "mov #-14, %0\n\t" \
206 "bra 2f\n\t" \
207 " mov #0, %1\n" \
208 "1:\n\t" \
209 "mov.b @%2, %1\n\t" \
210 "extu.b %1, %1\n" \
211 "2:\n" \
212 ".section __ex_table,\"a\"\n\t" \
213 ".long 1b, 0b\n\t" \
214 ".previous" \
215 : "=&r" (err), "=&r" (x) \
216 : "r" (addr) \
217 : "t"); \
218})
219
220#define __get_user_2(x,addr,err) ({ \
221__asm__("stc r7_bank, %1\n\t" \
222 "mov.l @(8,%1), %1\n\t" \
223 "and %2, %1\n\t" \
224 "cmp/pz %1\n\t" \
225 "bt/s 1f\n\t" \
226 " mov #0, %0\n\t" \
227 "0:\n" \
228 "mov #-14, %0\n\t" \
229 "bra 2f\n\t" \
230 " mov #0, %1\n" \
231 "1:\n\t" \
232 "mov.w @%2, %1\n\t" \
233 "extu.w %1, %1\n" \
234 "2:\n" \
235 ".section __ex_table,\"a\"\n\t" \
236 ".long 1b, 0b\n\t" \
237 ".previous" \
238 : "=&r" (err), "=&r" (x) \
239 : "r" (addr) \
240 : "t"); \
241})
242
243#define __get_user_4(x,addr,err) ({ \
244__asm__("stc r7_bank, %1\n\t" \
245 "mov.l @(8,%1), %1\n\t" \
246 "and %2, %1\n\t" \
247 "cmp/pz %1\n\t" \
248 "bt/s 1f\n\t" \
249 " mov #0, %0\n\t" \
250 "0:\n" \
251 "mov #-14, %0\n\t" \
252 "bra 2f\n\t" \
253 " mov #0, %1\n" \
254 "1:\n\t" \
255 "mov.l @%2, %1\n\t" \
256 "2:\n" \
257 ".section __ex_table,\"a\"\n\t" \
258 ".long 1b, 0b\n\t" \
259 ".previous" \
260 : "=&r" (err), "=&r" (x) \
261 : "r" (addr) \
262 : "t"); \
263})
264#else /* CONFIG_MMU */
265#define __get_user_check(x,ptr,size) \
266({ \
267 long __gu_err, __gu_val; \
268 if (__access_ok((unsigned long)(ptr), (size))) { \
269 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
270 (x) = (__typeof__(*(ptr)))__gu_val; \
271 } else \
272 __gu_err = -EFAULT; \
273 __gu_err; \
274})
275#endif
276
277#define __get_user_asm(x, addr, err, insn) \
278({ \
279__asm__ __volatile__( \
280 "1:\n\t" \
281 "mov." insn " %2, %1\n\t" \
282 "mov #0, %0\n" \
283 "2:\n" \
284 ".section .fixup,\"ax\"\n" \
285 "3:\n\t" \
286 "mov #0, %1\n\t" \
287 "mov.l 4f, %0\n\t" \
288 "jmp @%0\n\t" \
289 " mov %3, %0\n" \
290 "4: .long 2b\n\t" \
291 ".previous\n" \
292 ".section __ex_table,\"a\"\n\t" \
293 ".long 1b, 3b\n\t" \
294 ".previous" \
295 :"=&r" (err), "=&r" (x) \
296 :"m" (__m(addr)), "i" (-EFAULT)); })
297
298extern void __get_user_unknown(void);
299
300#define __put_user_size(x,ptr,size,retval) \
301do { \
302 retval = 0; \
303 __chk_user_ptr(ptr); \
304 switch (size) { \
305 case 1: \
306 __put_user_asm(x, ptr, retval, "b"); \
307 break; \
308 case 2: \
309 __put_user_asm(x, ptr, retval, "w"); \
310 break; \
311 case 4: \
312 __put_user_asm(x, ptr, retval, "l"); \
313 break; \
314 case 8: \
315 __put_user_u64(x, ptr, retval); \
316 break; \
317 default: \
318 __put_user_unknown(); \
319 } \
320} while (0)
321
322#define __put_user_nocheck(x,ptr,size) \
323({ \
324 long __pu_err; \
325 __put_user_size((x),(ptr),(size),__pu_err); \
326 __pu_err; \
327})
328
329#define __put_user_check(x,ptr,size) \
330({ \
331 long __pu_err = -EFAULT; \
332 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
333 \
334 if (__access_ok((unsigned long)__pu_addr,size)) \
335 __put_user_size((x),__pu_addr,(size),__pu_err); \
336 __pu_err; \
337})
338
339#define __put_user_asm(x, addr, err, insn) \
340({ \
341__asm__ __volatile__( \
342 "1:\n\t" \
343 "mov." insn " %1, %2\n\t" \
344 "mov #0, %0\n" \
345 "2:\n" \
346 ".section .fixup,\"ax\"\n" \
347 "3:\n\t" \
348 "nop\n\t" \
349 "mov.l 4f, %0\n\t" \
350 "jmp @%0\n\t" \
351 "mov %3, %0\n" \
352 "4: .long 2b\n\t" \
353 ".previous\n" \
354 ".section __ex_table,\"a\"\n\t" \
355 ".long 1b, 3b\n\t" \
356 ".previous" \
357 :"=&r" (err) \
358 :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \
359 :"memory"); })
360
361#if defined(__LITTLE_ENDIAN__)
362#define __put_user_u64(val,addr,retval) \
363({ \
364__asm__ __volatile__( \
365 "1:\n\t" \
366 "mov.l %R1,%2\n\t" \
367 "mov.l %S1,%T2\n\t" \
368 "mov #0,%0\n" \
369 "2:\n" \
370 ".section .fixup,\"ax\"\n" \
371 "3:\n\t" \
372 "nop\n\t" \
373 "mov.l 4f,%0\n\t" \
374 "jmp @%0\n\t" \
375 " mov %3,%0\n" \
376 "4: .long 2b\n\t" \
377 ".previous\n" \
378 ".section __ex_table,\"a\"\n\t" \
379 ".long 1b, 3b\n\t" \
380 ".previous" \
381 : "=r" (retval) \
382 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
383 : "memory"); })
384#else
385#define __put_user_u64(val,addr,retval) \
386({ \
387__asm__ __volatile__( \
388 "1:\n\t" \
389 "mov.l %S1,%2\n\t" \
390 "mov.l %R1,%T2\n\t" \
391 "mov #0,%0\n" \
392 "2:\n" \
393 ".section .fixup,\"ax\"\n" \
394 "3:\n\t" \
395 "nop\n\t" \
396 "mov.l 4f,%0\n\t" \
397 "jmp @%0\n\t" \
398 " mov %3,%0\n" \
399 "4: .long 2b\n\t" \
400 ".previous\n" \
401 ".section __ex_table,\"a\"\n\t" \
402 ".long 1b, 3b\n\t" \
403 ".previous" \
404 : "=r" (retval) \
405 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
406 : "memory"); })
407#endif
408
409extern void __put_user_unknown(void);
410
411/* Generic arbitrary sized copy. */
412/* Return the number of bytes NOT copied */
413__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
414
415#define copy_to_user(to,from,n) ({ \
416void *__copy_to = (void *) (to); \
417__kernel_size_t __copy_size = (__kernel_size_t) (n); \
418__kernel_size_t __copy_res; \
419if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
420__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
421} else __copy_res = __copy_size; \
422__copy_res; })
423
424#define copy_from_user(to,from,n) ({ \
425void *__copy_to = (void *) (to); \
426void *__copy_from = (void *) (from); \
427__kernel_size_t __copy_size = (__kernel_size_t) (n); \
428__kernel_size_t __copy_res; \
429if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
430__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
431} else __copy_res = __copy_size; \
432__copy_res; })
433
434static __always_inline unsigned long
435__copy_from_user(void *to, const void __user *from, unsigned long n)
436{
437 return __copy_user(to, (__force void *)from, n);
438}
439
440static __always_inline unsigned long __must_check
441__copy_to_user(void __user *to, const void *from, unsigned long n)
442{
443 return __copy_user((__force void *)to, from, n);
444}
445
446#define __copy_to_user_inatomic __copy_to_user
447#define __copy_from_user_inatomic __copy_from_user
448
449/*
450 * Clear the area and return remaining number of bytes
451 * (on failure. Usually it's 0.)
452 */
453extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
454
455#define clear_user(addr,n) ({ \
456void * __cl_addr = (addr); \
457unsigned long __cl_size = (n); \
458if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
459__cl_size = __clear_user(__cl_addr, __cl_size); \
460__cl_size; })
461
462static __inline__ int
463__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
464{
465 __kernel_size_t res;
466 unsigned long __dummy, _d, _s;
467
468 __asm__ __volatile__(
469 "9:\n"
470 "mov.b @%2+, %1\n\t"
471 "cmp/eq #0, %1\n\t"
472 "bt/s 2f\n"
473 "1:\n"
474 "mov.b %1, @%3\n\t"
475 "dt %7\n\t"
476 "bf/s 9b\n\t"
477 " add #1, %3\n\t"
478 "2:\n\t"
479 "sub %7, %0\n"
480 "3:\n"
481 ".section .fixup,\"ax\"\n"
482 "4:\n\t"
483 "mov.l 5f, %1\n\t"
484 "jmp @%1\n\t"
485 " mov %8, %0\n\t"
486 ".balign 4\n"
487 "5: .long 3b\n"
488 ".previous\n"
489 ".section __ex_table,\"a\"\n"
490 " .balign 4\n"
491 " .long 9b,4b\n"
492 ".previous"
493 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d)
494 : "0" (__count), "2" (__src), "3" (__dest), "r" (__count),
495 "i" (-EFAULT)
496 : "memory", "t");
497
498 return res;
499}
500
501#define strncpy_from_user(dest,src,count) ({ \
502unsigned long __sfu_src = (unsigned long) (src); \
503int __sfu_count = (int) (count); \
504long __sfu_res = -EFAULT; \
505if(__access_ok(__sfu_src, __sfu_count)) { \
506__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
507} __sfu_res; })
508
509/*
510 * Return the size of a string (including the ending 0!)
511 */
512static __inline__ long __strnlen_user(const char __user *__s, long __n)
513{
514 unsigned long res;
515 unsigned long __dummy;
516
517 __asm__ __volatile__(
518 "9:\n"
519 "cmp/eq %4, %0\n\t"
520 "bt 2f\n"
521 "1:\t"
522 "mov.b @(%0,%3), %1\n\t"
523 "tst %1, %1\n\t"
524 "bf/s 9b\n\t"
525 " add #1, %0\n"
526 "2:\n"
527 ".section .fixup,\"ax\"\n"
528 "3:\n\t"
529 "mov.l 4f, %1\n\t"
530 "jmp @%1\n\t"
531 " mov #0, %0\n"
532 ".balign 4\n"
533 "4: .long 2b\n"
534 ".previous\n"
535 ".section __ex_table,\"a\"\n"
536 " .balign 4\n"
537 " .long 1b,3b\n"
538 ".previous"
539 : "=z" (res), "=&r" (__dummy)
540 : "0" (0), "r" (__s), "r" (__n)
541 : "t");
542 return res;
543}
544
545static __inline__ long strnlen_user(const char __user *s, long n)
546{
547 if (!__addr_ok(s))
548 return 0;
549 else
550 return __strnlen_user(s, n);
551}
552
553#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
554
555/*
556 * The exception table consists of pairs of addresses: the first is the
557 * address of an instruction that is allowed to fault, and the second is
558 * the address at which the program should continue. No registers are
559 * modified, so it is entirely up to the continuation code to figure out
560 * what to do.
561 *
562 * All the routines below use bits of fixup code that are out of line
563 * with the main instruction path. This means when everything is well,
564 * we don't even have to jump over them. Further, they do not intrude
565 * on our cache or tlb entries.
566 */
567
568struct exception_table_entry
569{
570 unsigned long insn, fixup;
571};
572
573extern int fixup_exception(struct pt_regs *regs);
574
575#endif /* __ASM_SH_UACCESS_H */