summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-03-25 18:47:28 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2017-03-28 18:24:05 -0400
commit3f763453e6f27d82fa0ac58f8e1ac4094c1fb1f8 (patch)
tree66b4a1ef1f7d7b9573cf8ff53da4b48690a588e0
parent122b05ddf506e637336dcf64b5a129825f7bf6d4 (diff)
kill __copy_from_user_nocache()
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--arch/x86/include/asm/uaccess_32.h30
-rw-r--r--arch/x86/include/asm/uaccess_64.h8
-rw-r--r--arch/x86/lib/usercopy_32.c118
-rw-r--r--include/linux/uaccess.h6
-rw-r--r--lib/iov_iter.c4
5 files changed, 2 insertions, 164 deletions
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 5268ecceea96..19e6c050c438 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -14,8 +14,6 @@ unsigned long __must_check __copy_from_user_ll
14 (void *to, const void __user *from, unsigned long n); 14 (void *to, const void __user *from, unsigned long n);
15unsigned long __must_check __copy_from_user_ll_nozero 15unsigned long __must_check __copy_from_user_ll_nozero
16 (void *to, const void __user *from, unsigned long n); 16 (void *to, const void __user *from, unsigned long n);
17unsigned long __must_check __copy_from_user_ll_nocache
18 (void *to, const void __user *from, unsigned long n);
19unsigned long __must_check __copy_from_user_ll_nocache_nozero 17unsigned long __must_check __copy_from_user_ll_nocache_nozero
20 (void *to, const void __user *from, unsigned long n); 18 (void *to, const void __user *from, unsigned long n);
21 19
@@ -119,34 +117,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
119 return __copy_from_user_ll(to, from, n); 117 return __copy_from_user_ll(to, from, n);
120} 118}
121 119
122static __always_inline unsigned long __copy_from_user_nocache(void *to,
123 const void __user *from, unsigned long n)
124{
125 might_fault();
126 if (__builtin_constant_p(n)) {
127 unsigned long ret;
128
129 switch (n) {
130 case 1:
131 __uaccess_begin();
132 __get_user_size(*(u8 *)to, from, 1, ret, 1);
133 __uaccess_end();
134 return ret;
135 case 2:
136 __uaccess_begin();
137 __get_user_size(*(u16 *)to, from, 2, ret, 2);
138 __uaccess_end();
139 return ret;
140 case 4:
141 __uaccess_begin();
142 __get_user_size(*(u32 *)to, from, 4, ret, 4);
143 __uaccess_end();
144 return ret;
145 }
146 }
147 return __copy_from_user_ll_nocache(to, from, n);
148}
149
150static __always_inline unsigned long 120static __always_inline unsigned long
151__copy_from_user_inatomic_nocache(void *to, const void __user *from, 121__copy_from_user_inatomic_nocache(void *to, const void __user *from,
152 unsigned long n) 122 unsigned long n)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 142f0f1230be..242936b0cb4b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -261,14 +261,6 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
261 unsigned size, int zerorest); 261 unsigned size, int zerorest);
262 262
263static inline int 263static inline int
264__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
265{
266 might_fault();
267 kasan_check_write(dst, size);
268 return __copy_user_nocache(dst, src, size, 1);
269}
270
271static inline int
272__copy_from_user_inatomic_nocache(void *dst, const void __user *src, 264__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
273 unsigned size) 265 unsigned size)
274{ 266{
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 1f65ff6540f0..02aa7aa8b9f3 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -293,105 +293,6 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
293 return size; 293 return size;
294} 294}
295 295
296/*
297 * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
298 * hyoshiok@miraclelinux.com
299 */
300
301static unsigned long __copy_user_zeroing_intel_nocache(void *to,
302 const void __user *from, unsigned long size)
303{
304 int d0, d1;
305
306 __asm__ __volatile__(
307 " .align 2,0x90\n"
308 "0: movl 32(%4), %%eax\n"
309 " cmpl $67, %0\n"
310 " jbe 2f\n"
311 "1: movl 64(%4), %%eax\n"
312 " .align 2,0x90\n"
313 "2: movl 0(%4), %%eax\n"
314 "21: movl 4(%4), %%edx\n"
315 " movnti %%eax, 0(%3)\n"
316 " movnti %%edx, 4(%3)\n"
317 "3: movl 8(%4), %%eax\n"
318 "31: movl 12(%4),%%edx\n"
319 " movnti %%eax, 8(%3)\n"
320 " movnti %%edx, 12(%3)\n"
321 "4: movl 16(%4), %%eax\n"
322 "41: movl 20(%4), %%edx\n"
323 " movnti %%eax, 16(%3)\n"
324 " movnti %%edx, 20(%3)\n"
325 "10: movl 24(%4), %%eax\n"
326 "51: movl 28(%4), %%edx\n"
327 " movnti %%eax, 24(%3)\n"
328 " movnti %%edx, 28(%3)\n"
329 "11: movl 32(%4), %%eax\n"
330 "61: movl 36(%4), %%edx\n"
331 " movnti %%eax, 32(%3)\n"
332 " movnti %%edx, 36(%3)\n"
333 "12: movl 40(%4), %%eax\n"
334 "71: movl 44(%4), %%edx\n"
335 " movnti %%eax, 40(%3)\n"
336 " movnti %%edx, 44(%3)\n"
337 "13: movl 48(%4), %%eax\n"
338 "81: movl 52(%4), %%edx\n"
339 " movnti %%eax, 48(%3)\n"
340 " movnti %%edx, 52(%3)\n"
341 "14: movl 56(%4), %%eax\n"
342 "91: movl 60(%4), %%edx\n"
343 " movnti %%eax, 56(%3)\n"
344 " movnti %%edx, 60(%3)\n"
345 " addl $-64, %0\n"
346 " addl $64, %4\n"
347 " addl $64, %3\n"
348 " cmpl $63, %0\n"
349 " ja 0b\n"
350 " sfence \n"
351 "5: movl %0, %%eax\n"
352 " shrl $2, %0\n"
353 " andl $3, %%eax\n"
354 " cld\n"
355 "6: rep; movsl\n"
356 " movl %%eax,%0\n"
357 "7: rep; movsb\n"
358 "8:\n"
359 ".section .fixup,\"ax\"\n"
360 "9: lea 0(%%eax,%0,4),%0\n"
361 "16: pushl %0\n"
362 " pushl %%eax\n"
363 " xorl %%eax,%%eax\n"
364 " rep; stosb\n"
365 " popl %%eax\n"
366 " popl %0\n"
367 " jmp 8b\n"
368 ".previous\n"
369 _ASM_EXTABLE(0b,16b)
370 _ASM_EXTABLE(1b,16b)
371 _ASM_EXTABLE(2b,16b)
372 _ASM_EXTABLE(21b,16b)
373 _ASM_EXTABLE(3b,16b)
374 _ASM_EXTABLE(31b,16b)
375 _ASM_EXTABLE(4b,16b)
376 _ASM_EXTABLE(41b,16b)
377 _ASM_EXTABLE(10b,16b)
378 _ASM_EXTABLE(51b,16b)
379 _ASM_EXTABLE(11b,16b)
380 _ASM_EXTABLE(61b,16b)
381 _ASM_EXTABLE(12b,16b)
382 _ASM_EXTABLE(71b,16b)
383 _ASM_EXTABLE(13b,16b)
384 _ASM_EXTABLE(81b,16b)
385 _ASM_EXTABLE(14b,16b)
386 _ASM_EXTABLE(91b,16b)
387 _ASM_EXTABLE(6b,9b)
388 _ASM_EXTABLE(7b,16b)
389 : "=&c"(size), "=&D" (d0), "=&S" (d1)
390 : "1"(to), "2"(from), "0"(size)
391 : "eax", "edx", "memory");
392 return size;
393}
394
395static unsigned long __copy_user_intel_nocache(void *to, 296static unsigned long __copy_user_intel_nocache(void *to,
396 const void __user *from, unsigned long size) 297 const void __user *from, unsigned long size)
397{ 298{
@@ -490,8 +391,6 @@ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
490 unsigned long size); 391 unsigned long size);
491unsigned long __copy_user_intel(void __user *to, const void *from, 392unsigned long __copy_user_intel(void __user *to, const void *from,
492 unsigned long size); 393 unsigned long size);
493unsigned long __copy_user_zeroing_intel_nocache(void *to,
494 const void __user *from, unsigned long size);
495#endif /* CONFIG_X86_INTEL_USERCOPY */ 394#endif /* CONFIG_X86_INTEL_USERCOPY */
496 395
497/* Generic arbitrary sized copy. */ 396/* Generic arbitrary sized copy. */
@@ -607,23 +506,6 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
607} 506}
608EXPORT_SYMBOL(__copy_from_user_ll_nozero); 507EXPORT_SYMBOL(__copy_from_user_ll_nozero);
609 508
610unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
611 unsigned long n)
612{
613 stac();
614#ifdef CONFIG_X86_INTEL_USERCOPY
615 if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
616 n = __copy_user_zeroing_intel_nocache(to, from, n);
617 else
618 __copy_user_zeroing(to, from, n);
619#else
620 __copy_user_zeroing(to, from, n);
621#endif
622 clac();
623 return n;
624}
625EXPORT_SYMBOL(__copy_from_user_ll_nocache);
626
627unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, 509unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
628 unsigned long n) 510 unsigned long n)
629{ 511{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 5f76bc995d96..7fc2104b88bc 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -261,12 +261,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
261 return __copy_from_user_inatomic(to, from, n); 261 return __copy_from_user_inatomic(to, from, n);
262} 262}
263 263
264static inline unsigned long __copy_from_user_nocache(void *to,
265 const void __user *from, unsigned long n)
266{
267 return __copy_from_user(to, from, n);
268}
269
270#endif /* ARCH_HAS_NOCACHE_UACCESS */ 264#endif /* ARCH_HAS_NOCACHE_UACCESS */
271 265
272/* 266/*
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 97db876c6862..672c32f9f960 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
604 return 0; 604 return 0;
605 } 605 }
606 iterate_and_advance(i, bytes, v, 606 iterate_and_advance(i, bytes, v,
607 __copy_from_user_nocache((to += v.iov_len) - v.iov_len, 607 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
608 v.iov_base, v.iov_len), 608 v.iov_base, v.iov_len),
609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
610 v.bv_offset, v.bv_len), 610 v.bv_offset, v.bv_len),
@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
625 if (unlikely(i->count < bytes)) 625 if (unlikely(i->count < bytes))
626 return false; 626 return false;
627 iterate_all_kinds(i, bytes, v, ({ 627 iterate_all_kinds(i, bytes, v, ({
628 if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, 628 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
629 v.iov_base, v.iov_len)) 629 v.iov_base, v.iov_len))
630 return false; 630 return false;
631 0;}), 631 0;}),