diff options
author | Daniel Thompson <daniel.thompson@linaro.org> | 2014-07-10 15:58:08 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-07-18 07:29:34 -0400 |
commit | e38361d032f12f42ddc6d8e2028f6668da696d14 (patch) | |
tree | 43b60217fbff7e9edc93f6466647548e5e5b8da3 | |
parent | bc994c77ce82576209dcf08f71de9ae51b0b100f (diff) |
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/include/asm/uaccess.h | 20 | ||||
-rw-r--r-- | arch/arm/lib/getuser.S | 37 |
2 files changed, 55 insertions, 2 deletions
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 75d95799b6e6..7057cf8b87d0 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -107,6 +107,8 @@ static inline void set_fs(mm_segment_t fs) | |||
107 | extern int __get_user_1(void *); | 107 | extern int __get_user_1(void *); |
108 | extern int __get_user_2(void *); | 108 | extern int __get_user_2(void *); |
109 | extern int __get_user_4(void *); | 109 | extern int __get_user_4(void *); |
110 | extern int __get_user_lo8(void *); | ||
111 | extern int __get_user_8(void *); | ||
110 | 112 | ||
111 | #define __GUP_CLOBBER_1 "lr", "cc" | 113 | #define __GUP_CLOBBER_1 "lr", "cc" |
112 | #ifdef CONFIG_CPU_USE_DOMAINS | 114 | #ifdef CONFIG_CPU_USE_DOMAINS |
@@ -115,6 +117,8 @@ extern int __get_user_4(void *); | |||
115 | #define __GUP_CLOBBER_2 "lr", "cc" | 117 | #define __GUP_CLOBBER_2 "lr", "cc" |
116 | #endif | 118 | #endif |
117 | #define __GUP_CLOBBER_4 "lr", "cc" | 119 | #define __GUP_CLOBBER_4 "lr", "cc" |
120 | #define __GUP_CLOBBER_lo8 "lr", "cc" | ||
121 | #define __GUP_CLOBBER_8 "lr", "cc" | ||
118 | 122 | ||
119 | #define __get_user_x(__r2,__p,__e,__l,__s) \ | 123 | #define __get_user_x(__r2,__p,__e,__l,__s) \ |
120 | __asm__ __volatile__ ( \ | 124 | __asm__ __volatile__ ( \ |
@@ -125,11 +129,19 @@ extern int __get_user_4(void *); | |||
125 | : "0" (__p), "r" (__l) \ | 129 | : "0" (__p), "r" (__l) \ |
126 | : __GUP_CLOBBER_##__s) | 130 | : __GUP_CLOBBER_##__s) |
127 | 131 | ||
132 | /* narrowing a double-word get into a single 32bit word register: */ | ||
133 | #ifdef __ARMEB__ | ||
134 | #define __get_user_xb(__r2, __p, __e, __l, __s) \ | ||
135 | __get_user_x(__r2, __p, __e, __l, lo8) | ||
136 | #else | ||
137 | #define __get_user_xb __get_user_x | ||
138 | #endif | ||
139 | |||
128 | #define __get_user_check(x,p) \ | 140 | #define __get_user_check(x,p) \ |
129 | ({ \ | 141 | ({ \ |
130 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ | 142 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ |
131 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ | 143 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ |
132 | register unsigned long __r2 asm("r2"); \ | 144 | register typeof(x) __r2 asm("r2"); \ |
133 | register unsigned long __l asm("r1") = __limit; \ | 145 | register unsigned long __l asm("r1") = __limit; \ |
134 | register int __e asm("r0"); \ | 146 | register int __e asm("r0"); \ |
135 | switch (sizeof(*(__p))) { \ | 147 | switch (sizeof(*(__p))) { \ |
@@ -142,6 +154,12 @@ extern int __get_user_4(void *); | |||
142 | case 4: \ | 154 | case 4: \ |
143 | __get_user_x(__r2, __p, __e, __l, 4); \ | 155 | __get_user_x(__r2, __p, __e, __l, 4); \ |
144 | break; \ | 156 | break; \ |
157 | case 8: \ | ||
158 | if (sizeof((x)) < 8) \ | ||
159 | __get_user_xb(__r2, __p, __e, __l, 4); \ | ||
160 | else \ | ||
161 | __get_user_x(__r2, __p, __e, __l, 8); \ | ||
162 | break; \ | ||
145 | default: __e = __get_user_bad(); break; \ | 163 | default: __e = __get_user_bad(); break; \ |
146 | } \ | 164 | } \ |
147 | x = (typeof(*(p))) __r2; \ | 165 | x = (typeof(*(p))) __r2; \ |
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 0f958e3d8180..938600098b88 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S | |||
@@ -18,7 +18,7 @@ | |||
18 | * Inputs: r0 contains the address | 18 | * Inputs: r0 contains the address |
19 | * r1 contains the address limit, which must be preserved | 19 | * r1 contains the address limit, which must be preserved |
20 | * Outputs: r0 is the error code | 20 | * Outputs: r0 is the error code |
21 | * r2 contains the zero-extended value | 21 | * r2, r3 contains the zero-extended value |
22 | * lr corrupted | 22 | * lr corrupted |
23 | * | 23 | * |
24 | * No other registers must be altered. (see <asm/uaccess.h> | 24 | * No other registers must be altered. (see <asm/uaccess.h> |
@@ -66,15 +66,50 @@ ENTRY(__get_user_4) | |||
66 | ret lr | 66 | ret lr |
67 | ENDPROC(__get_user_4) | 67 | ENDPROC(__get_user_4) |
68 | 68 | ||
69 | ENTRY(__get_user_8) | ||
70 | check_uaccess r0, 8, r1, r2, __get_user_bad | ||
71 | #ifdef CONFIG_THUMB2_KERNEL | ||
72 | 5: TUSER(ldr) r2, [r0] | ||
73 | 6: TUSER(ldr) r3, [r0, #4] | ||
74 | #else | ||
75 | 5: TUSER(ldr) r2, [r0], #4 | ||
76 | 6: TUSER(ldr) r3, [r0] | ||
77 | #endif | ||
78 | mov r0, #0 | ||
79 | ret lr | ||
80 | ENDPROC(__get_user_8) | ||
81 | |||
82 | #ifdef __ARMEB__ | ||
83 | ENTRY(__get_user_lo8) | ||
84 | check_uaccess r0, 8, r1, r2, __get_user_bad | ||
85 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
86 | add r0, r0, #4 | ||
87 | 7: ldrt r2, [r0] | ||
88 | #else | ||
89 | 7: ldr r2, [r0, #4] | ||
90 | #endif | ||
91 | mov r0, #0 | ||
92 | ret lr | ||
93 | ENDPROC(__get_user_lo8) | ||
94 | #endif | ||
95 | |||
96 | __get_user_bad8: | ||
97 | mov r3, #0 | ||
69 | __get_user_bad: | 98 | __get_user_bad: |
70 | mov r2, #0 | 99 | mov r2, #0 |
71 | mov r0, #-EFAULT | 100 | mov r0, #-EFAULT |
72 | ret lr | 101 | ret lr |
73 | ENDPROC(__get_user_bad) | 102 | ENDPROC(__get_user_bad) |
103 | ENDPROC(__get_user_bad8) | ||
74 | 104 | ||
75 | .pushsection __ex_table, "a" | 105 | .pushsection __ex_table, "a" |
76 | .long 1b, __get_user_bad | 106 | .long 1b, __get_user_bad |
77 | .long 2b, __get_user_bad | 107 | .long 2b, __get_user_bad |
78 | .long 3b, __get_user_bad | 108 | .long 3b, __get_user_bad |
79 | .long 4b, __get_user_bad | 109 | .long 4b, __get_user_bad |
110 | .long 5b, __get_user_bad8 | ||
111 | .long 6b, __get_user_bad8 | ||
112 | #ifdef __ARMEB__ | ||
113 | .long 7b, __get_user_bad | ||
114 | #endif | ||
80 | .popsection | 115 | .popsection |