diff options
author | Joe Perches <joe@perches.com> | 2008-03-23 04:03:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:28 -0400 |
commit | b896313e53344e79cc8bbc69f0a7d5c2b1735895 (patch) | |
tree | 64ac8f35229223121e5589abdae6980be76013c3 /include | |
parent | b1fcec7f2296c4b9126e1b85b52494ac8910d528 (diff) |
include/asm-x86/uaccess_64.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/uaccess_64.h | 376 |
1 files changed, 227 insertions, 149 deletions
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index b87eb4ba8f9d..b8a2f4339903 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
@@ -29,23 +29,27 @@ | |||
29 | #define get_fs() (current_thread_info()->addr_limit) | 29 | #define get_fs() (current_thread_info()->addr_limit) |
30 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | 30 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
31 | 31 | ||
32 | #define segment_eq(a,b) ((a).seg == (b).seg) | 32 | #define segment_eq(a, b) ((a).seg == (b).seg) |
33 | 33 | ||
34 | #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg))) | 34 | #define __addr_ok(addr) (!((unsigned long)(addr) & \ |
35 | (current_thread_info()->addr_limit.seg))) | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * Uhhuh, this needs 65-bit arithmetic. We have a carry.. | 38 | * Uhhuh, this needs 65-bit arithmetic. We have a carry.. |
38 | */ | 39 | */ |
39 | #define __range_not_ok(addr,size) ({ \ | 40 | #define __range_not_ok(addr, size) \ |
40 | unsigned long flag,roksum; \ | 41 | ({ \ |
41 | __chk_user_ptr(addr); \ | 42 | unsigned long flag, roksum; \ |
42 | asm("# range_ok\n\r" \ | 43 | __chk_user_ptr(addr); \ |
43 | "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ | 44 | asm("# range_ok\n\r" \ |
44 | :"=&r" (flag), "=r" (roksum) \ | 45 | "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ |
45 | :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \ | 46 | : "=&r" (flag), "=r" (roksum) \ |
46 | flag; }) | 47 | : "1" (addr), "g" ((long)(size)), \ |
48 | "g" (current_thread_info()->addr_limit.seg)); \ | ||
49 | flag; \ | ||
50 | }) | ||
47 | 51 | ||
48 | #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) | 52 | #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0) |
49 | 53 | ||
50 | /* | 54 | /* |
51 | * The exception table consists of pairs of addresses: the first is the | 55 | * The exception table consists of pairs of addresses: the first is the |
@@ -60,8 +64,7 @@ | |||
60 | * on our cache or tlb entries. | 64 | * on our cache or tlb entries. |
61 | */ | 65 | */ |
62 | 66 | ||
63 | struct exception_table_entry | 67 | struct exception_table_entry { |
64 | { | ||
65 | unsigned long insn, fixup; | 68 | unsigned long insn, fixup; |
66 | }; | 69 | }; |
67 | 70 | ||
@@ -84,23 +87,36 @@ extern int fixup_exception(struct pt_regs *regs); | |||
84 | * accesses to the same area of user memory). | 87 | * accesses to the same area of user memory). |
85 | */ | 88 | */ |
86 | 89 | ||
87 | #define __get_user_x(size,ret,x,ptr) \ | 90 | #define __get_user_x(size, ret, x, ptr) \ |
88 | asm volatile("call __get_user_" #size \ | 91 | asm volatile("call __get_user_" #size \ |
89 | :"=a" (ret),"=d" (x) \ | 92 | : "=a" (ret),"=d" (x) \ |
90 | :"c" (ptr) \ | 93 | : "c" (ptr) \ |
91 | :"r8") | 94 | : "r8") |
95 | |||
96 | /* Careful: we have to cast the result to the type of the pointer | ||
97 | * for sign reasons */ | ||
92 | 98 | ||
93 | /* Careful: we have to cast the result to the type of the pointer for sign reasons */ | 99 | #define get_user(x, ptr) \ |
94 | #define get_user(x,ptr) \ | 100 | ({ \ |
95 | ({ unsigned long __val_gu; \ | 101 | unsigned long __val_gu; \ |
96 | int __ret_gu; \ | 102 | int __ret_gu; \ |
97 | __chk_user_ptr(ptr); \ | 103 | __chk_user_ptr(ptr); \ |
98 | switch(sizeof (*(ptr))) { \ | 104 | switch (sizeof(*(ptr))) { \ |
99 | case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ | 105 | case 1: \ |
100 | case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ | 106 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ |
101 | case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ | 107 | break; \ |
102 | case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ | 108 | case 2: \ |
103 | default: __get_user_bad(); break; \ | 109 | __get_user_x(2, __ret_gu, __val_gu, ptr); \ |
110 | break; \ | ||
111 | case 4: \ | ||
112 | __get_user_x(4, __ret_gu, __val_gu, ptr); \ | ||
113 | break; \ | ||
114 | case 8: \ | ||
115 | __get_user_x(8, __ret_gu, __val_gu, ptr); \ | ||
116 | break; \ | ||
117 | default: \ | ||
118 | __get_user_bad(); \ | ||
119 | break; \ | ||
104 | } \ | 120 | } \ |
105 | (x) = (__force typeof(*(ptr)))__val_gu; \ | 121 | (x) = (__force typeof(*(ptr)))__val_gu; \ |
106 | __ret_gu; \ | 122 | __ret_gu; \ |
@@ -112,55 +128,73 @@ extern void __put_user_4(void); | |||
112 | extern void __put_user_8(void); | 128 | extern void __put_user_8(void); |
113 | extern void __put_user_bad(void); | 129 | extern void __put_user_bad(void); |
114 | 130 | ||
115 | #define __put_user_x(size,ret,x,ptr) \ | 131 | #define __put_user_x(size, ret, x, ptr) \ |
116 | asm volatile("call __put_user_" #size \ | 132 | asm volatile("call __put_user_" #size \ |
117 | :"=a" (ret) \ | 133 | :"=a" (ret) \ |
118 | :"c" (ptr),"d" (x) \ | 134 | :"c" (ptr),"d" (x) \ |
119 | :"r8") | 135 | :"r8") |
120 | 136 | ||
121 | #define put_user(x,ptr) \ | 137 | #define put_user(x, ptr) \ |
122 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 138 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
123 | 139 | ||
124 | #define __get_user(x,ptr) \ | 140 | #define __get_user(x, ptr) \ |
125 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 141 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
126 | #define __put_user(x,ptr) \ | 142 | #define __put_user(x, ptr) \ |
127 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 143 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
128 | 144 | ||
129 | #define __get_user_unaligned __get_user | 145 | #define __get_user_unaligned __get_user |
130 | #define __put_user_unaligned __put_user | 146 | #define __put_user_unaligned __put_user |
131 | 147 | ||
132 | #define __put_user_nocheck(x,ptr,size) \ | 148 | #define __put_user_nocheck(x, ptr, size) \ |
133 | ({ \ | 149 | ({ \ |
134 | int __pu_err; \ | 150 | int __pu_err; \ |
135 | __put_user_size((x),(ptr),(size),__pu_err); \ | 151 | __put_user_size((x), (ptr), (size), __pu_err); \ |
136 | __pu_err; \ | 152 | __pu_err; \ |
137 | }) | 153 | }) |
138 | 154 | ||
139 | 155 | ||
140 | #define __put_user_check(x,ptr,size) \ | 156 | #define __put_user_check(x, ptr, size) \ |
141 | ({ \ | 157 | ({ \ |
142 | int __pu_err; \ | 158 | int __pu_err; \ |
143 | typeof(*(ptr)) __user *__pu_addr = (ptr); \ | 159 | typeof(*(ptr)) __user *__pu_addr = (ptr); \ |
144 | switch (size) { \ | 160 | switch (size) { \ |
145 | case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ | 161 | case 1: \ |
146 | case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ | 162 | __put_user_x(1, __pu_err, x, __pu_addr); \ |
147 | case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \ | 163 | break; \ |
148 | case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \ | 164 | case 2: \ |
149 | default: __put_user_bad(); \ | 165 | __put_user_x(2, __pu_err, x, __pu_addr); \ |
150 | } \ | 166 | break; \ |
151 | __pu_err; \ | 167 | case 4: \ |
168 | __put_user_x(4, __pu_err, x, __pu_addr); \ | ||
169 | break; \ | ||
170 | case 8: \ | ||
171 | __put_user_x(8, __pu_err, x, __pu_addr); \ | ||
172 | break; \ | ||
173 | default: \ | ||
174 | __put_user_bad(); \ | ||
175 | } \ | ||
176 | __pu_err; \ | ||
152 | }) | 177 | }) |
153 | 178 | ||
154 | #define __put_user_size(x,ptr,size,retval) \ | 179 | #define __put_user_size(x, ptr, size, retval) \ |
155 | do { \ | 180 | do { \ |
156 | retval = 0; \ | 181 | retval = 0; \ |
157 | __chk_user_ptr(ptr); \ | 182 | __chk_user_ptr(ptr); \ |
158 | switch (size) { \ | 183 | switch (size) { \ |
159 | case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\ | 184 | case 1: \ |
160 | case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\ | 185 | __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\ |
161 | case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\ | 186 | break; \ |
162 | case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\ | 187 | case 2: \ |
163 | default: __put_user_bad(); \ | 188 | __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\ |
189 | break; \ | ||
190 | case 4: \ | ||
191 | __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\ | ||
192 | break; \ | ||
193 | case 8: \ | ||
194 | __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \ | ||
195 | break; \ | ||
196 | default: \ | ||
197 | __put_user_bad(); \ | ||
164 | } \ | 198 | } \ |
165 | } while (0) | 199 | } while (0) |
166 | 200 | ||
@@ -174,23 +208,22 @@ struct __large_struct { unsigned long buf[100]; }; | |||
174 | * aliasing issues. | 208 | * aliasing issues. |
175 | */ | 209 | */ |
176 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ | 210 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ |
177 | asm volatile( \ | 211 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ |
178 | "1: mov"itype" %"rtype"1,%2\n" \ | 212 | "2:\n" \ |
179 | "2:\n" \ | 213 | ".section .fixup, \"ax\"\n" \ |
180 | ".section .fixup,\"ax\"\n" \ | 214 | "3: mov %3,%0\n" \ |
181 | "3: mov %3,%0\n" \ | 215 | " jmp 2b\n" \ |
182 | " jmp 2b\n" \ | 216 | ".previous\n" \ |
183 | ".previous\n" \ | 217 | _ASM_EXTABLE(1b, 3b) \ |
184 | _ASM_EXTABLE(1b,3b) \ | 218 | : "=r"(err) \ |
185 | : "=r"(err) \ | 219 | : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err)) |
186 | : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) | 220 | |
187 | 221 | ||
188 | 222 | #define __get_user_nocheck(x, ptr, size) \ | |
189 | #define __get_user_nocheck(x,ptr,size) \ | ||
190 | ({ \ | 223 | ({ \ |
191 | int __gu_err; \ | 224 | int __gu_err; \ |
192 | unsigned long __gu_val; \ | 225 | unsigned long __gu_val; \ |
193 | __get_user_size(__gu_val,(ptr),(size),__gu_err); \ | 226 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ |
194 | (x) = (__force typeof(*(ptr)))__gu_val; \ | 227 | (x) = (__force typeof(*(ptr)))__gu_val; \ |
195 | __gu_err; \ | 228 | __gu_err; \ |
196 | }) | 229 | }) |
@@ -201,31 +234,39 @@ extern int __get_user_4(void); | |||
201 | extern int __get_user_8(void); | 234 | extern int __get_user_8(void); |
202 | extern int __get_user_bad(void); | 235 | extern int __get_user_bad(void); |
203 | 236 | ||
204 | #define __get_user_size(x,ptr,size,retval) \ | 237 | #define __get_user_size(x, ptr, size, retval) \ |
205 | do { \ | 238 | do { \ |
206 | retval = 0; \ | 239 | retval = 0; \ |
207 | __chk_user_ptr(ptr); \ | 240 | __chk_user_ptr(ptr); \ |
208 | switch (size) { \ | 241 | switch (size) { \ |
209 | case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\ | 242 | case 1: \ |
210 | case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\ | 243 | __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\ |
211 | case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\ | 244 | break; \ |
212 | case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\ | 245 | case 2: \ |
213 | default: (x) = __get_user_bad(); \ | 246 | __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\ |
247 | break; \ | ||
248 | case 4: \ | ||
249 | __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\ | ||
250 | break; \ | ||
251 | case 8: \ | ||
252 | __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \ | ||
253 | break; \ | ||
254 | default: \ | ||
255 | (x) = __get_user_bad(); \ | ||
214 | } \ | 256 | } \ |
215 | } while (0) | 257 | } while (0) |
216 | 258 | ||
217 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ | 259 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ |
218 | asm volatile( \ | 260 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ |
219 | "1: mov"itype" %2,%"rtype"1\n" \ | 261 | "2:\n" \ |
220 | "2:\n" \ | 262 | ".section .fixup, \"ax\"\n" \ |
221 | ".section .fixup,\"ax\"\n" \ | 263 | "3: mov %3,%0\n" \ |
222 | "3: mov %3,%0\n" \ | 264 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
223 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 265 | " jmp 2b\n" \ |
224 | " jmp 2b\n" \ | 266 | ".previous\n" \ |
225 | ".previous\n" \ | 267 | _ASM_EXTABLE(1b, 3b) \ |
226 | _ASM_EXTABLE(1b,3b) \ | 268 | : "=r" (err), ltype (x) \ |
227 | : "=r"(err), ltype (x) \ | 269 | : "m" (__m(addr)), "i"(errno), "0"(err)) |
228 | : "m"(__m(addr)), "i"(errno), "0"(err)) | ||
229 | 270 | ||
230 | /* | 271 | /* |
231 | * Copy To/From Userspace | 272 | * Copy To/From Userspace |
@@ -244,110 +285,142 @@ copy_in_user(void __user *to, const void __user *from, unsigned len); | |||
244 | 285 | ||
245 | static __always_inline __must_check | 286 | static __always_inline __must_check |
246 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 287 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
247 | { | 288 | { |
248 | int ret = 0; | 289 | int ret = 0; |
249 | if (!__builtin_constant_p(size)) | 290 | if (!__builtin_constant_p(size)) |
250 | return copy_user_generic(dst,(__force void *)src,size); | 291 | return copy_user_generic(dst, (__force void *)src, size); |
251 | switch (size) { | 292 | switch (size) { |
252 | case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); | 293 | case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, |
294 | ret, "b", "b", "=q", 1); | ||
253 | return ret; | 295 | return ret; |
254 | case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2); | 296 | case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, |
297 | ret, "w", "w", "=r", 2); | ||
255 | return ret; | 298 | return ret; |
256 | case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4); | 299 | case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, |
300 | ret, "l", "k", "=r", 4); | ||
301 | return ret; | ||
302 | case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, | ||
303 | ret, "q", "", "=r", 8); | ||
257 | return ret; | 304 | return ret; |
258 | case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8); | ||
259 | return ret; | ||
260 | case 10: | 305 | case 10: |
261 | __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); | 306 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
262 | if (unlikely(ret)) return ret; | 307 | ret, "q", "", "=r", 16); |
263 | __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2); | 308 | if (unlikely(ret)) |
264 | return ret; | 309 | return ret; |
310 | __get_user_asm(*(u16 *)(8 + (char *)dst), | ||
311 | (u16 __user *)(8 + (char __user *)src), | ||
312 | ret, "w", "w", "=r", 2); | ||
313 | return ret; | ||
265 | case 16: | 314 | case 16: |
266 | __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); | 315 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
267 | if (unlikely(ret)) return ret; | 316 | ret, "q", "", "=r", 16); |
268 | __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8); | 317 | if (unlikely(ret)) |
269 | return ret; | 318 | return ret; |
319 | __get_user_asm(*(u64 *)(8 + (char *)dst), | ||
320 | (u64 __user *)(8 + (char __user *)src), | ||
321 | ret, "q", "", "=r", 8); | ||
322 | return ret; | ||
270 | default: | 323 | default: |
271 | return copy_user_generic(dst,(__force void *)src,size); | 324 | return copy_user_generic(dst, (__force void *)src, size); |
272 | } | 325 | } |
273 | } | 326 | } |
274 | 327 | ||
275 | static __always_inline __must_check | 328 | static __always_inline __must_check |
276 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | 329 | int __copy_to_user(void __user *dst, const void *src, unsigned size) |
277 | { | 330 | { |
278 | int ret = 0; | 331 | int ret = 0; |
279 | if (!__builtin_constant_p(size)) | 332 | if (!__builtin_constant_p(size)) |
280 | return copy_user_generic((__force void *)dst,src,size); | 333 | return copy_user_generic((__force void *)dst, src, size); |
281 | switch (size) { | 334 | switch (size) { |
282 | case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); | 335 | case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, |
336 | ret, "b", "b", "iq", 1); | ||
283 | return ret; | 337 | return ret; |
284 | case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2); | 338 | case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, |
339 | ret, "w", "w", "ir", 2); | ||
285 | return ret; | 340 | return ret; |
286 | case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4); | 341 | case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, |
342 | ret, "l", "k", "ir", 4); | ||
343 | return ret; | ||
344 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, | ||
345 | ret, "q", "", "ir", 8); | ||
287 | return ret; | 346 | return ret; |
288 | case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8); | ||
289 | return ret; | ||
290 | case 10: | 347 | case 10: |
291 | __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10); | 348 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
292 | if (unlikely(ret)) return ret; | 349 | ret, "q", "", "ir", 10); |
350 | if (unlikely(ret)) | ||
351 | return ret; | ||
293 | asm("":::"memory"); | 352 | asm("":::"memory"); |
294 | __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2); | 353 | __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, |
295 | return ret; | 354 | ret, "w", "w", "ir", 2); |
355 | return ret; | ||
296 | case 16: | 356 | case 16: |
297 | __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16); | 357 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
298 | if (unlikely(ret)) return ret; | 358 | ret, "q", "", "ir", 16); |
359 | if (unlikely(ret)) | ||
360 | return ret; | ||
299 | asm("":::"memory"); | 361 | asm("":::"memory"); |
300 | __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8); | 362 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, |
301 | return ret; | 363 | ret, "q", "", "ir", 8); |
364 | return ret; | ||
302 | default: | 365 | default: |
303 | return copy_user_generic((__force void *)dst,src,size); | 366 | return copy_user_generic((__force void *)dst, src, size); |
304 | } | 367 | } |
305 | } | 368 | } |
306 | 369 | ||
307 | static __always_inline __must_check | 370 | static __always_inline __must_check |
308 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | 371 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
309 | { | 372 | { |
310 | int ret = 0; | 373 | int ret = 0; |
311 | if (!__builtin_constant_p(size)) | 374 | if (!__builtin_constant_p(size)) |
312 | return copy_user_generic((__force void *)dst,(__force void *)src,size); | 375 | return copy_user_generic((__force void *)dst, |
313 | switch (size) { | 376 | (__force void *)src, size); |
314 | case 1: { | 377 | switch (size) { |
378 | case 1: { | ||
315 | u8 tmp; | 379 | u8 tmp; |
316 | __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); | 380 | __get_user_asm(tmp, (u8 __user *)src, |
381 | ret, "b", "b", "=q", 1); | ||
317 | if (likely(!ret)) | 382 | if (likely(!ret)) |
318 | __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); | 383 | __put_user_asm(tmp, (u8 __user *)dst, |
384 | ret, "b", "b", "iq", 1); | ||
319 | return ret; | 385 | return ret; |
320 | } | 386 | } |
321 | case 2: { | 387 | case 2: { |
322 | u16 tmp; | 388 | u16 tmp; |
323 | __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); | 389 | __get_user_asm(tmp, (u16 __user *)src, |
390 | ret, "w", "w", "=r", 2); | ||
324 | if (likely(!ret)) | 391 | if (likely(!ret)) |
325 | __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); | 392 | __put_user_asm(tmp, (u16 __user *)dst, |
393 | ret, "w", "w", "ir", 2); | ||
326 | return ret; | 394 | return ret; |
327 | } | 395 | } |
328 | 396 | ||
329 | case 4: { | 397 | case 4: { |
330 | u32 tmp; | 398 | u32 tmp; |
331 | __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); | 399 | __get_user_asm(tmp, (u32 __user *)src, |
400 | ret, "l", "k", "=r", 4); | ||
332 | if (likely(!ret)) | 401 | if (likely(!ret)) |
333 | __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); | 402 | __put_user_asm(tmp, (u32 __user *)dst, |
403 | ret, "l", "k", "ir", 4); | ||
334 | return ret; | 404 | return ret; |
335 | } | 405 | } |
336 | case 8: { | 406 | case 8: { |
337 | u64 tmp; | 407 | u64 tmp; |
338 | __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); | 408 | __get_user_asm(tmp, (u64 __user *)src, |
409 | ret, "q", "", "=r", 8); | ||
339 | if (likely(!ret)) | 410 | if (likely(!ret)) |
340 | __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); | 411 | __put_user_asm(tmp, (u64 __user *)dst, |
412 | ret, "q", "", "ir", 8); | ||
341 | return ret; | 413 | return ret; |
342 | } | 414 | } |
343 | default: | 415 | default: |
344 | return copy_user_generic((__force void *)dst,(__force void *)src,size); | 416 | return copy_user_generic((__force void *)dst, |
417 | (__force void *)src, size); | ||
345 | } | 418 | } |
346 | } | 419 | } |
347 | 420 | ||
348 | __must_check long | 421 | __must_check long |
349 | strncpy_from_user(char *dst, const char __user *src, long count); | 422 | strncpy_from_user(char *dst, const char __user *src, long count); |
350 | __must_check long | 423 | __must_check long |
351 | __strncpy_from_user(char *dst, const char __user *src, long count); | 424 | __strncpy_from_user(char *dst, const char __user *src, long count); |
352 | __must_check long strnlen_user(const char __user *str, long n); | 425 | __must_check long strnlen_user(const char __user *str, long n); |
353 | __must_check long __strnlen_user(const char __user *str, long n); | 426 | __must_check long __strnlen_user(const char __user *str, long n); |
@@ -355,7 +428,8 @@ __must_check long strlen_user(const char __user *str); | |||
355 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | 428 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); |
356 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | 429 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); |
357 | 430 | ||
358 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size); | 431 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, |
432 | unsigned size); | ||
359 | 433 | ||
360 | static __must_check __always_inline int | 434 | static __must_check __always_inline int |
361 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 435 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
@@ -364,15 +438,19 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | |||
364 | } | 438 | } |
365 | 439 | ||
366 | #define ARCH_HAS_NOCACHE_UACCESS 1 | 440 | #define ARCH_HAS_NOCACHE_UACCESS 1 |
367 | extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); | 441 | extern long __copy_user_nocache(void *dst, const void __user *src, |
442 | unsigned size, int zerorest); | ||
368 | 443 | ||
369 | static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) | 444 | static inline int __copy_from_user_nocache(void *dst, const void __user *src, |
445 | unsigned size) | ||
370 | { | 446 | { |
371 | might_sleep(); | 447 | might_sleep(); |
372 | return __copy_user_nocache(dst, src, size, 1); | 448 | return __copy_user_nocache(dst, src, size, 1); |
373 | } | 449 | } |
374 | 450 | ||
375 | static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) | 451 | static inline int __copy_from_user_inatomic_nocache(void *dst, |
452 | const void __user *src, | ||
453 | unsigned size) | ||
376 | { | 454 | { |
377 | return __copy_user_nocache(dst, src, size, 0); | 455 | return __copy_user_nocache(dst, src, size, 0); |
378 | } | 456 | } |