aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/uaccess.h')
-rw-r--r--arch/x86/include/asm/uaccess.h69
1 files changed, 9 insertions, 60 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a0ae610b9280..c3f291195294 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -697,43 +697,14 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
697unsigned long __must_check _copy_to_user(void __user *to, const void *from, 697unsigned long __must_check _copy_to_user(void __user *to, const void *from,
698 unsigned n); 698 unsigned n);
699 699
700#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 700extern void __compiletime_error("usercopy buffer size is too small")
701# define copy_user_diag __compiletime_error 701__bad_copy_user(void);
702#else
703# define copy_user_diag __compiletime_warning
704#endif
705
706extern void copy_user_diag("copy_from_user() buffer size is too small")
707copy_from_user_overflow(void);
708extern void copy_user_diag("copy_to_user() buffer size is too small")
709copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
710
711#undef copy_user_diag
712
713#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
714
715extern void
716__compiletime_warning("copy_from_user() buffer size is not provably correct")
717__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
718#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
719
720extern void
721__compiletime_warning("copy_to_user() buffer size is not provably correct")
722__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
723#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
724
725#else
726 702
727static inline void 703static inline void copy_user_overflow(int size, unsigned long count)
728__copy_from_user_overflow(int size, unsigned long count)
729{ 704{
730 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 705 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
731} 706}
732 707
733#define __copy_to_user_overflow __copy_from_user_overflow
734
735#endif
736
737static inline unsigned long __must_check 708static inline unsigned long __must_check
738copy_from_user(void *to, const void __user *from, unsigned long n) 709copy_from_user(void *to, const void __user *from, unsigned long n)
739{ 710{
@@ -743,31 +714,13 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
743 714
744 kasan_check_write(to, n); 715 kasan_check_write(to, n);
745 716
746 /*
747 * While we would like to have the compiler do the checking for us
748 * even in the non-constant size case, any false positives there are
749 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
750 * without - the [hopefully] dangerous looking nature of the warning
751 * would make people go look at the respecitive call sites over and
752 * over again just to find that there's no problem).
753 *
754 * And there are cases where it's just not realistic for the compiler
755 * to prove the count to be in range. For example when multiple call
756 * sites of a helper function - perhaps in different source files -
757 * all doing proper range checking, yet the helper function not doing
758 * so again.
759 *
760 * Therefore limit the compile time checking to the constant size
761 * case, and do only runtime checking for non-constant sizes.
762 */
763
764 if (likely(sz < 0 || sz >= n)) { 717 if (likely(sz < 0 || sz >= n)) {
765 check_object_size(to, n, false); 718 check_object_size(to, n, false);
766 n = _copy_from_user(to, from, n); 719 n = _copy_from_user(to, from, n);
767 } else if (__builtin_constant_p(n)) 720 } else if (!__builtin_constant_p(n))
768 copy_from_user_overflow(); 721 copy_user_overflow(sz, n);
769 else 722 else
770 __copy_from_user_overflow(sz, n); 723 __bad_copy_user();
771 724
772 return n; 725 return n;
773} 726}
@@ -781,21 +734,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
781 734
782 might_fault(); 735 might_fault();
783 736
784 /* See the comment in copy_from_user() above. */
785 if (likely(sz < 0 || sz >= n)) { 737 if (likely(sz < 0 || sz >= n)) {
786 check_object_size(from, n, true); 738 check_object_size(from, n, true);
787 n = _copy_to_user(to, from, n); 739 n = _copy_to_user(to, from, n);
788 } else if (__builtin_constant_p(n)) 740 } else if (!__builtin_constant_p(n))
789 copy_to_user_overflow(); 741 copy_user_overflow(sz, n);
790 else 742 else
791 __copy_to_user_overflow(sz, n); 743 __bad_copy_user();
792 744
793 return n; 745 return n;
794} 746}
795 747
796#undef __copy_from_user_overflow
797#undef __copy_to_user_overflow
798
799/* 748/*
800 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 749 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
801 * nested NMI paths are careful to preserve CR2. 750 * nested NMI paths are careful to preserve CR2.