aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/uaccess_32.h
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-06-25 10:48:29 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-09 03:14:22 -0400
commitdc70ddf4098de043ac52f623c7573a11f2ae3d09 (patch)
tree0641f33c243a7d1feec051670cdced9ffec36535 /include/asm-x86/uaccess_32.h
parentd42e6af613375be7a9a431628ecd742e87230554 (diff)
x86: merge __put_user_asm and its user.
Move both __put_user_asm and __put_user_size to uaccess.h. i386 already had a special function for 64-bit access, so for x86_64, we just define a macro with the same name. Note that for X86_64, CONFIG_X86_WP_WORKS_OK will always be defined, so the #else part will never be even compiled in. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/uaccess_32.h')
-rw-r--r--include/asm-x86/uaccess_32.h77
1 files changed, 0 insertions, 77 deletions
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 4c47a5ba65e3..fab755781b9b 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -145,83 +145,6 @@ extern void __put_user_8(void);
145#define __put_user(x, ptr) \ 145#define __put_user(x, ptr) \
146 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 146 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
147 147
148#define __put_user_nocheck(x, ptr, size) \
149({ \
150 long __pu_err; \
151 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
152 __pu_err; \
153})
154
155
156#define __put_user_u64(x, addr, err) \
157 asm volatile("1: movl %%eax,0(%2)\n" \
158 "2: movl %%edx,4(%2)\n" \
159 "3:\n" \
160 ".section .fixup,\"ax\"\n" \
161 "4: movl %3,%0\n" \
162 " jmp 3b\n" \
163 ".previous\n" \
164 _ASM_EXTABLE(1b, 4b) \
165 _ASM_EXTABLE(2b, 4b) \
166 : "=r" (err) \
167 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
168
169#ifdef CONFIG_X86_WP_WORKS_OK
170
171#define __put_user_size(x, ptr, size, retval, errret) \
172do { \
173 retval = 0; \
174 __chk_user_ptr(ptr); \
175 switch (size) { \
176 case 1: \
177 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
178 break; \
179 case 2: \
180 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
181 break; \
182 case 4: \
183 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
184 break; \
185 case 8: \
186 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
187 break; \
188 default: \
189 __put_user_bad(); \
190 } \
191} while (0)
192
193#else
194
195#define __put_user_size(x, ptr, size, retval, errret) \
196do { \
197 __typeof__(*(ptr))__pus_tmp = x; \
198 retval = 0; \
199 \
200 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
201 retval = errret; \
202} while (0)
203
204#endif
205struct __large_struct { unsigned long buf[100]; };
206#define __m(x) (*(struct __large_struct __user *)(x))
207
208/*
209 * Tell gcc we read from memory instead of writing: this is because
210 * we do not write to any memory gcc knows about, so there are no
211 * aliasing issues.
212 */
213#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
214 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
215 "2:\n" \
216 ".section .fixup,\"ax\"\n" \
217 "3: movl %3,%0\n" \
218 " jmp 2b\n" \
219 ".previous\n" \
220 _ASM_EXTABLE(1b, 3b) \
221 : "=r"(err) \
222 : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
223
224
225#define __get_user_nocheck(x, ptr, size) \ 148#define __get_user_nocheck(x, ptr, size) \
226({ \ 149({ \
227 long __gu_err; \ 150 long __gu_err; \