aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-x86/uaccess.h124
-rw-r--r--include/asm-x86/uaccess_32.h110
-rw-r--r--include/asm-x86/uaccess_64.h83
3 files changed, 124 insertions, 193 deletions
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index 9fefd2947e7..2fc30c2a8a9 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -1,5 +1,129 @@
1#ifndef _ASM_UACCES_H_
2#define _ASM_UACCES_H_
3/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
36/*
37 * Test whether a block of memory is a valid user space address.
38 * Returns 0 if the range is valid, nonzero otherwise.
39 *
40 * This is equivalent to the following test:
41 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
42 *
43 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
44 */
45
46#define __range_not_ok(addr, size) \
47({ \
48 unsigned long flag, roksum; \
49 __chk_user_ptr(addr); \
50 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
51 : "=&r" (flag), "=r" (roksum) \
52 : "1" (addr), "g" ((long)(size)), \
53 "rm" (current_thread_info()->addr_limit.seg)); \
54 flag; \
55})
56
57/**
58 * access_ok: - Checks if a user space pointer is valid
59 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
60 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
61 * to write to a block, it is always safe to read from it.
62 * @addr: User space pointer to start of block to check
63 * @size: Size of block to check
64 *
65 * Context: User context only. This function may sleep.
66 *
67 * Checks if a pointer to a block of memory in user space is valid.
68 *
69 * Returns true (nonzero) if the memory block may be valid, false (zero)
70 * if it is definitely invalid.
71 *
72 * Note that, depending on architecture, this function probably just
73 * checks that the pointer is in the user space range - after calling
74 * this function, memory access functions may still return -EFAULT.
75 */
76#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
77
78/*
79 * The exception table consists of pairs of addresses: the first is the
80 * address of an instruction that is allowed to fault, and the second is
81 * the address at which the program should continue. No registers are
82 * modified, so it is entirely up to the continuation code to figure out
83 * what to do.
84 *
85 * All the routines below use bits of fixup code that are out of line
86 * with the main instruction path. This means when everything is well,
87 * we don't even have to jump over them. Further, they do not intrude
88 * on our cache or tlb entries.
89 */
90
91struct exception_table_entry {
92 unsigned long insn, fixup;
93};
94
95extern int fixup_exception(struct pt_regs *regs);
96
97/*
98 * These are the main single-value transfer routines. They automatically
99 * use the right size if we just have the right pointer type.
100 *
101 * This gets kind of ugly. We want to return _two_ values in "get_user()"
102 * and yet we don't want to do any pointers, because that is too much
103 * of a performance impact. Thus we have a few rather ugly macros here,
104 * and hide all the ugliness from the user.
105 *
106 * The "__xxx" versions of the user access functions are versions that
107 * do not verify the address space, that must have been done previously
108 * with a separate "access_ok()" call (this is used when we do multiple
109 * accesses to the same area of user memory).
110 */
111
112extern int __get_user_1(void);
113extern int __get_user_2(void);
114extern int __get_user_4(void);
115extern int __get_user_8(void);
116extern int __get_user_bad(void);
117
118#define __get_user_x(size, ret, x, ptr) \
119 asm volatile("call __get_user_" #size \
120 : "=a" (ret),"=d" (x) \
121 : "0" (ptr)) \
122
1#ifdef CONFIG_X86_32 123#ifdef CONFIG_X86_32
2# include "uaccess_32.h" 124# include "uaccess_32.h"
3#else 125#else
4# include "uaccess_64.h" 126# include "uaccess_64.h"
5#endif 127#endif
128
129#endif
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 2676b48ac0f..92ad19e7098 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -11,29 +11,6 @@
11#include <asm/asm.h> 11#include <asm/asm.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27
28#define KERNEL_DS MAKE_MM_SEG(-1UL)
29#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
30
31#define get_ds() (KERNEL_DS)
32#define get_fs() (current_thread_info()->addr_limit)
33#define set_fs(x) (current_thread_info()->addr_limit = (x))
34
35#define segment_eq(a, b) ((a).seg == (b).seg)
36
37/* 14/*
38 * movsl can be slow when source and dest are not both 8-byte aligned 15 * movsl can be slow when source and dest are not both 8-byte aligned
39 */ 16 */
@@ -47,91 +24,6 @@ extern struct movsl_mask {
47 ((unsigned long __force)(addr) < \ 24 ((unsigned long __force)(addr) < \
48 (current_thread_info()->addr_limit.seg)) 25 (current_thread_info()->addr_limit.seg))
49 26
50/*
51 * Test whether a block of memory is a valid user space address.
52 * Returns 0 if the range is valid, nonzero otherwise.
53 *
54 * This is equivalent to the following test:
55 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
56 *
57 * This needs 33-bit arithmetic. We have a carry...
58 */
59#define __range_not_ok(addr, size) \
60({ \
61 unsigned long flag, roksum; \
62 __chk_user_ptr(addr); \
63 asm("add %3,%1 ; sbb %0,%0; cmp %1,%4; sbb $0,%0" \
64 :"=&r" (flag), "=r" (roksum) \
65 :"1" (addr), "g" ((long)(size)), \
66 "rm" (current_thread_info()->addr_limit.seg)); \
67 flag; \
68})
69
70/**
71 * access_ok: - Checks if a user space pointer is valid
72 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
73 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
74 * to write to a block, it is always safe to read from it.
75 * @addr: User space pointer to start of block to check
76 * @size: Size of block to check
77 *
78 * Context: User context only. This function may sleep.
79 *
80 * Checks if a pointer to a block of memory in user space is valid.
81 *
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
84 *
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
88 */
89#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
90
91/*
92 * The exception table consists of pairs of addresses: the first is the
93 * address of an instruction that is allowed to fault, and the second is
94 * the address at which the program should continue. No registers are
95 * modified, so it is entirely up to the continuation code to figure out
96 * what to do.
97 *
98 * All the routines below use bits of fixup code that are out of line
99 * with the main instruction path. This means when everything is well,
100 * we don't even have to jump over them. Further, they do not intrude
101 * on our cache or tlb entries.
102 */
103
104struct exception_table_entry {
105 unsigned long insn, fixup;
106};
107
108extern int fixup_exception(struct pt_regs *regs);
109
110/*
111 * These are the main single-value transfer routines. They automatically
112 * use the right size if we just have the right pointer type.
113 *
114 * This gets kind of ugly. We want to return _two_ values in "get_user()"
115 * and yet we don't want to do any pointers, because that is too much
116 * of a performance impact. Thus we have a few rather ugly macros here,
117 * and hide all the ugliness from the user.
118 *
119 * The "__xxx" versions of the user access functions are versions that
120 * do not verify the address space, that must have been done previously
121 * with a separate "access_ok()" call (this is used when we do multiple
122 * accesses to the same area of user memory).
123 */
124
125extern void __get_user_1(void);
126extern void __get_user_2(void);
127extern void __get_user_4(void);
128
129#define __get_user_x(size, ret, x, ptr) \
130 asm volatile("call __get_user_" #size \
131 :"=a" (ret),"=d" (x) \
132 :"0" (ptr))
133
134
135/* Careful: we have to cast the result to the type of the pointer 27/* Careful: we have to cast the result to the type of the pointer
136 * for sign reasons */ 28 * for sign reasons */
137 29
@@ -386,8 +278,6 @@ struct __large_struct { unsigned long buf[100]; };
386 __gu_err; \ 278 __gu_err; \
387}) 279})
388 280
389extern long __get_user_bad(void);
390
391#define __get_user_size(x, ptr, size, retval, errret) \ 281#define __get_user_size(x, ptr, size, retval, errret) \
392do { \ 282do { \
393 retval = 0; \ 283 retval = 0; \
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 3a81775136c..243dbb467f3 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -9,88 +9,11 @@
9#include <linux/prefetch.h> 9#include <linux/prefetch.h>
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12#define VERIFY_READ 0
13#define VERIFY_WRITE 1
14
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
26#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
27
28#define get_ds() (KERNEL_DS)
29#define get_fs() (current_thread_info()->addr_limit)
30#define set_fs(x) (current_thread_info()->addr_limit = (x))
31
32#define segment_eq(a, b) ((a).seg == (b).seg)
33
34#define __addr_ok(addr) (!((unsigned long)(addr) & \ 12#define __addr_ok(addr) (!((unsigned long)(addr) & \
35 (current_thread_info()->addr_limit.seg))) 13 (current_thread_info()->addr_limit.seg)))
36 14
37/*
38 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39 */
40#define __range_not_ok(addr, size) \
41({ \
42 unsigned long flag, roksum; \
43 __chk_user_ptr(addr); \
44 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
45 : "=&r" (flag), "=r" (roksum) \
46 : "1" (addr), "g" ((long)(size)), \
47 "rm" (current_thread_info()->addr_limit.seg)); \
48 flag; \
49})
50
51#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
52
53/*
54 * The exception table consists of pairs of addresses: the first is the
55 * address of an instruction that is allowed to fault, and the second is
56 * the address at which the program should continue. No registers are
57 * modified, so it is entirely up to the continuation code to figure out
58 * what to do.
59 *
60 * All the routines below use bits of fixup code that are out of line
61 * with the main instruction path. This means when everything is well,
62 * we don't even have to jump over them. Further, they do not intrude
63 * on our cache or tlb entries.
64 */
65
66struct exception_table_entry {
67 unsigned long insn, fixup;
68};
69
70extern int fixup_exception(struct pt_regs *regs);
71
72#define ARCH_HAS_SEARCH_EXTABLE 15#define ARCH_HAS_SEARCH_EXTABLE
73 16
74/*
75 * These are the main single-value transfer routines. They automatically
76 * use the right size if we just have the right pointer type.
77 *
78 * This gets kind of ugly. We want to return _two_ values in "get_user()"
79 * and yet we don't want to do any pointers, because that is too much
80 * of a performance impact. Thus we have a few rather ugly macros here,
81 * and hide all the ugliness from the user.
82 *
83 * The "__xxx" versions of the user access functions are versions that
84 * do not verify the address space, that must have been done previously
85 * with a separate "access_ok()" call (this is used when we do multiple
86 * accesses to the same area of user memory).
87 */
88
89#define __get_user_x(size, ret, x, ptr) \
90 asm volatile("call __get_user_" #size \
91 : "=a" (ret),"=d" (x) \
92 : "0" (ptr)) \
93
94/* Careful: we have to cast the result to the type of the pointer 17/* Careful: we have to cast the result to the type of the pointer
95 * for sign reasons */ 18 * for sign reasons */
96 19
@@ -226,12 +149,6 @@ struct __large_struct { unsigned long buf[100]; };
226 __gu_err; \ 149 __gu_err; \
227}) 150})
228 151
229extern int __get_user_1(void);
230extern int __get_user_2(void);
231extern int __get_user_4(void);
232extern int __get_user_8(void);
233extern int __get_user_bad(void);
234
235#define __get_user_size(x, ptr, size, retval) \ 152#define __get_user_size(x, ptr, size, retval) \
236do { \ 153do { \
237 retval = 0; \ 154 retval = 0; \