aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/uaccess_32.h
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-06-13 13:39:25 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-09 03:14:18 -0400
commitca23386216b9d4fc3bb211101205077d2b2916ae (patch)
tree258a4239a07f42da5c6b7d468b75eedcd962cba2 /include/asm-x86/uaccess_32.h
parentbe9d06bfd48934fbd56ccb7476eabccfa31b4afe (diff)
x86: merge common parts of uaccess.
Common parts of uaccess_32.h and uaccess_64.h are put in uaccess.h. Bits in uaccess_32.h and uaccess_64.h that come to this file are equal except for comments and whitespaces differences. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/uaccess_32.h')
-rw-r--r--include/asm-x86/uaccess_32.h110
1 files changed, 0 insertions, 110 deletions
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 2676b48ac0fa..92ad19e70989 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -11,29 +11,6 @@
11#include <asm/asm.h> 11#include <asm/asm.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27
28#define KERNEL_DS MAKE_MM_SEG(-1UL)
29#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
30
31#define get_ds() (KERNEL_DS)
32#define get_fs() (current_thread_info()->addr_limit)
33#define set_fs(x) (current_thread_info()->addr_limit = (x))
34
35#define segment_eq(a, b) ((a).seg == (b).seg)
36
37/* 14/*
38 * movsl can be slow when source and dest are not both 8-byte aligned 15 * movsl can be slow when source and dest are not both 8-byte aligned
39 */ 16 */
@@ -47,91 +24,6 @@ extern struct movsl_mask {
47 ((unsigned long __force)(addr) < \ 24 ((unsigned long __force)(addr) < \
48 (current_thread_info()->addr_limit.seg)) 25 (current_thread_info()->addr_limit.seg))
49 26
50/*
51 * Test whether a block of memory is a valid user space address.
52 * Returns 0 if the range is valid, nonzero otherwise.
53 *
54 * This is equivalent to the following test:
55 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
56 *
57 * This needs 33-bit arithmetic. We have a carry...
58 */
59#define __range_not_ok(addr, size) \
60({ \
61 unsigned long flag, roksum; \
62 __chk_user_ptr(addr); \
63 asm("add %3,%1 ; sbb %0,%0; cmp %1,%4; sbb $0,%0" \
64 :"=&r" (flag), "=r" (roksum) \
65 :"1" (addr), "g" ((long)(size)), \
66 "rm" (current_thread_info()->addr_limit.seg)); \
67 flag; \
68})
69
70/**
71 * access_ok: - Checks if a user space pointer is valid
72 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
73 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
74 * to write to a block, it is always safe to read from it.
75 * @addr: User space pointer to start of block to check
76 * @size: Size of block to check
77 *
78 * Context: User context only. This function may sleep.
79 *
80 * Checks if a pointer to a block of memory in user space is valid.
81 *
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
84 *
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
88 */
89#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
90
91/*
92 * The exception table consists of pairs of addresses: the first is the
93 * address of an instruction that is allowed to fault, and the second is
94 * the address at which the program should continue. No registers are
95 * modified, so it is entirely up to the continuation code to figure out
96 * what to do.
97 *
98 * All the routines below use bits of fixup code that are out of line
99 * with the main instruction path. This means when everything is well,
100 * we don't even have to jump over them. Further, they do not intrude
101 * on our cache or tlb entries.
102 */
103
104struct exception_table_entry {
105 unsigned long insn, fixup;
106};
107
108extern int fixup_exception(struct pt_regs *regs);
109
110/*
111 * These are the main single-value transfer routines. They automatically
112 * use the right size if we just have the right pointer type.
113 *
114 * This gets kind of ugly. We want to return _two_ values in "get_user()"
115 * and yet we don't want to do any pointers, because that is too much
116 * of a performance impact. Thus we have a few rather ugly macros here,
117 * and hide all the ugliness from the user.
118 *
119 * The "__xxx" versions of the user access functions are versions that
120 * do not verify the address space, that must have been done previously
121 * with a separate "access_ok()" call (this is used when we do multiple
122 * accesses to the same area of user memory).
123 */
124
125extern void __get_user_1(void);
126extern void __get_user_2(void);
127extern void __get_user_4(void);
128
129#define __get_user_x(size, ret, x, ptr) \
130 asm volatile("call __get_user_" #size \
131 :"=a" (ret),"=d" (x) \
132 :"0" (ptr))
133
134
135/* Careful: we have to cast the result to the type of the pointer 27/* Careful: we have to cast the result to the type of the pointer
136 * for sign reasons */ 28 * for sign reasons */
137 29
@@ -386,8 +278,6 @@ struct __large_struct { unsigned long buf[100]; };
386 __gu_err; \ 278 __gu_err; \
387}) 279})
388 280
389extern long __get_user_bad(void);
390
391#define __get_user_size(x, ptr, size, retval, errret) \ 281#define __get_user_size(x, ptr, size, retval, errret) \
392do { \ 282do { \
393 retval = 0; \ 283 retval = 0; \