aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/uaccess.h
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-06-25 10:48:29 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-09 03:14:22 -0400
commitdc70ddf4098de043ac52f623c7573a11f2ae3d09 (patch)
tree0641f33c243a7d1feec051670cdced9ffec36535 /include/asm-x86/uaccess.h
parentd42e6af613375be7a9a431628ecd742e87230554 (diff)
x86: merge __put_user_asm and its user.
Move both __put_user_asm and __put_user_size to uaccess.h. i386 already had a special function for 64-bit access, so for x86_64, we just define a macro with the same name. Note that for X86_64, CONFIG_X86_WP_WORKS_OK will always be defined, so the #else part will never be even compiled in. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/uaccess.h')
-rw-r--r--include/asm-x86/uaccess.h84
1 files changed, 84 insertions, 0 deletions
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index 8a1e45fdc980..bcda5d075921 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -178,6 +178,90 @@ extern int __get_user_bad(void);
178 __ret_gu; \ 178 __ret_gu; \
179}) 179})
180 180
181#ifdef CONFIG_X86_32
182#define __put_user_u64(x, addr, err) \
183 asm volatile("1: movl %%eax,0(%2)\n" \
184 "2: movl %%edx,4(%2)\n" \
185 "3:\n" \
186 ".section .fixup,\"ax\"\n" \
187 "4: movl %3,%0\n" \
188 " jmp 3b\n" \
189 ".previous\n" \
190 _ASM_EXTABLE(1b, 4b) \
191 _ASM_EXTABLE(2b, 4b) \
192 : "=r" (err) \
193 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
194#else
195#define __put_user_u64(x, ptr, retval) \
196 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
197#endif
198
199#ifdef CONFIG_X86_WP_WORKS_OK
200
201#define __put_user_size(x, ptr, size, retval, errret) \
202do { \
203 retval = 0; \
204 __chk_user_ptr(ptr); \
205 switch (size) { \
206 case 1: \
207 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
208 break; \
209 case 2: \
210 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
211 break; \
212 case 4: \
213 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
214 break; \
215 case 8: \
216 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
217 break; \
218 default: \
219 __put_user_bad(); \
220 } \
221} while (0)
222
223#else
224
225#define __put_user_size(x, ptr, size, retval, errret) \
226do { \
227 __typeof__(*(ptr))__pus_tmp = x; \
228 retval = 0; \
229 \
230 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
231 retval = errret; \
232} while (0)
233
234#endif
235
236#define __put_user_nocheck(x, ptr, size) \
237({ \
238 long __pu_err; \
239 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
240 __pu_err; \
241})
242
243
244
245/* FIXME: this hack is definitely wrong -AK */
246struct __large_struct { unsigned long buf[100]; };
247#define __m(x) (*(struct __large_struct __user *)(x))
248
249/*
250 * Tell gcc we read from memory instead of writing: this is because
251 * we do not write to any memory gcc knows about, so there are no
252 * aliasing issues.
253 */
254#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
255 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
256 "2:\n" \
257 ".section .fixup,\"ax\"\n" \
258 "3: mov %3,%0\n" \
259 " jmp 2b\n" \
260 ".previous\n" \
261 _ASM_EXTABLE(1b, 3b) \
262 : "=r"(err) \
263 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
264
181 265
182#ifdef CONFIG_X86_32 266#ifdef CONFIG_X86_32
183# include "uaccess_32.h" 267# include "uaccess_32.h"