diff options
author | James Hogan <james.hogan@imgtec.com> | 2017-03-31 06:14:02 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-04-12 06:41:13 -0400 |
commit | 3dc0fe517a9fb44f6c45cbb787cc4bdf5e9a3d0f (patch) | |
tree | 413d815492f89686b72b5a5628fa6aa9f8fd833c /arch/metag | |
parent | 4a93ac814ddcc8d2841f224d9157ab241feab1ac (diff) |
metag/usercopy: Zero rest of buffer from copy_from_user
commit 563ddc1076109f2b3f88e6d355eab7b6fd4662cb upstream.
Currently we try to zero the destination for a failed read from userland
in fixup code in the usercopy.c macros. The rest of the destination
buffer is then zeroed from __copy_user_zeroing(), which is used for both
copy_from_user() and __copy_from_user().
Unfortunately we fail to zero in the fixup code as D1Ar1 is set to 0
before the fixup code entry labels, and __copy_from_user() shouldn't even
be zeroing the rest of the buffer.
Move the zeroing out into copy_from_user() and rename
__copy_user_zeroing() to raw_copy_from_user() since it no longer does
any zeroing. This also conveniently matches the name needed for
RAW_COPY_USER support in a later patch.
Fixes: 373cd784d0fc ("metag: Memory handling")
Reported-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/metag')
-rw-r--r-- | arch/metag/include/asm/uaccess.h | 15 | ||||
-rw-r--r-- | arch/metag/lib/usercopy.c | 57 |
2 files changed, 26 insertions, 46 deletions
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 273e61225c27..07238b39638c 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h | |||
@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); | |||
197 | 197 | ||
198 | #define strlen_user(str) strnlen_user(str, 32767) | 198 | #define strlen_user(str) strnlen_user(str, 32767) |
199 | 199 | ||
200 | extern unsigned long __must_check __copy_user_zeroing(void *to, | 200 | extern unsigned long raw_copy_from_user(void *to, const void __user *from, |
201 | const void __user *from, | 201 | unsigned long n); |
202 | unsigned long n); | ||
203 | 202 | ||
204 | static inline unsigned long | 203 | static inline unsigned long |
205 | copy_from_user(void *to, const void __user *from, unsigned long n) | 204 | copy_from_user(void *to, const void __user *from, unsigned long n) |
206 | { | 205 | { |
206 | unsigned long res = n; | ||
207 | if (likely(access_ok(VERIFY_READ, from, n))) | 207 | if (likely(access_ok(VERIFY_READ, from, n))) |
208 | return __copy_user_zeroing(to, from, n); | 208 | res = raw_copy_from_user(to, from, n); |
209 | memset(to, 0, n); | 209 | if (unlikely(res)) |
210 | return n; | 210 | memset(to + (n - res), 0, res); |
211 | return res; | ||
211 | } | 212 | } |
212 | 213 | ||
213 | #define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) | 214 | #define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) |
214 | #define __copy_from_user_inatomic __copy_from_user | 215 | #define __copy_from_user_inatomic __copy_from_user |
215 | 216 | ||
216 | extern unsigned long __must_check __copy_user(void __user *to, | 217 | extern unsigned long __must_check __copy_user(void __user *to, |
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index 714d8562aa20..e1d553872fd7 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c | |||
@@ -29,7 +29,6 @@ | |||
29 | COPY \ | 29 | COPY \ |
30 | "1:\n" \ | 30 | "1:\n" \ |
31 | " .section .fixup,\"ax\"\n" \ | 31 | " .section .fixup,\"ax\"\n" \ |
32 | " MOV D1Ar1,#0\n" \ | ||
33 | FIXUP \ | 32 | FIXUP \ |
34 | " MOVT D1Ar1,#HI(1b)\n" \ | 33 | " MOVT D1Ar1,#HI(1b)\n" \ |
35 | " JUMP D1Ar1,#LO(1b)\n" \ | 34 | " JUMP D1Ar1,#LO(1b)\n" \ |
@@ -637,16 +636,14 @@ EXPORT_SYMBOL(__copy_user); | |||
637 | __asm_copy_user_cont(to, from, ret, \ | 636 | __asm_copy_user_cont(to, from, ret, \ |
638 | " GETB D1Ar1,[%1++]\n" \ | 637 | " GETB D1Ar1,[%1++]\n" \ |
639 | "2: SETB [%0++],D1Ar1\n", \ | 638 | "2: SETB [%0++],D1Ar1\n", \ |
640 | "3: ADD %2,%2,#1\n" \ | 639 | "3: ADD %2,%2,#1\n", \ |
641 | " SETB [%0++],D1Ar1\n", \ | ||
642 | " .long 2b,3b\n") | 640 | " .long 2b,3b\n") |
643 | 641 | ||
644 | #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | 642 | #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ |
645 | __asm_copy_user_cont(to, from, ret, \ | 643 | __asm_copy_user_cont(to, from, ret, \ |
646 | " GETW D1Ar1,[%1++]\n" \ | 644 | " GETW D1Ar1,[%1++]\n" \ |
647 | "2: SETW [%0++],D1Ar1\n" COPY, \ | 645 | "2: SETW [%0++],D1Ar1\n" COPY, \ |
648 | "3: ADD %2,%2,#2\n" \ | 646 | "3: ADD %2,%2,#2\n" FIXUP, \ |
649 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
650 | " .long 2b,3b\n" TENTRY) | 647 | " .long 2b,3b\n" TENTRY) |
651 | 648 | ||
652 | #define __asm_copy_from_user_2(to, from, ret) \ | 649 | #define __asm_copy_from_user_2(to, from, ret) \ |
@@ -656,32 +653,26 @@ EXPORT_SYMBOL(__copy_user); | |||
656 | __asm_copy_from_user_2x_cont(to, from, ret, \ | 653 | __asm_copy_from_user_2x_cont(to, from, ret, \ |
657 | " GETB D1Ar1,[%1++]\n" \ | 654 | " GETB D1Ar1,[%1++]\n" \ |
658 | "4: SETB [%0++],D1Ar1\n", \ | 655 | "4: SETB [%0++],D1Ar1\n", \ |
659 | "5: ADD %2,%2,#1\n" \ | 656 | "5: ADD %2,%2,#1\n", \ |
660 | " SETB [%0++],D1Ar1\n", \ | ||
661 | " .long 4b,5b\n") | 657 | " .long 4b,5b\n") |
662 | 658 | ||
663 | #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | 659 | #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ |
664 | __asm_copy_user_cont(to, from, ret, \ | 660 | __asm_copy_user_cont(to, from, ret, \ |
665 | " GETD D1Ar1,[%1++]\n" \ | 661 | " GETD D1Ar1,[%1++]\n" \ |
666 | "2: SETD [%0++],D1Ar1\n" COPY, \ | 662 | "2: SETD [%0++],D1Ar1\n" COPY, \ |
667 | "3: ADD %2,%2,#4\n" \ | 663 | "3: ADD %2,%2,#4\n" FIXUP, \ |
668 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
669 | " .long 2b,3b\n" TENTRY) | 664 | " .long 2b,3b\n" TENTRY) |
670 | 665 | ||
671 | #define __asm_copy_from_user_4(to, from, ret) \ | 666 | #define __asm_copy_from_user_4(to, from, ret) \ |
672 | __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") | 667 | __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") |
673 | 668 | ||
674 | |||
675 | #define __asm_copy_from_user_8x64(to, from, ret) \ | 669 | #define __asm_copy_from_user_8x64(to, from, ret) \ |
676 | asm volatile ( \ | 670 | asm volatile ( \ |
677 | " GETL D0Ar2,D1Ar1,[%1++]\n" \ | 671 | " GETL D0Ar2,D1Ar1,[%1++]\n" \ |
678 | "2: SETL [%0++],D0Ar2,D1Ar1\n" \ | 672 | "2: SETL [%0++],D0Ar2,D1Ar1\n" \ |
679 | "1:\n" \ | 673 | "1:\n" \ |
680 | " .section .fixup,\"ax\"\n" \ | 674 | " .section .fixup,\"ax\"\n" \ |
681 | " MOV D1Ar1,#0\n" \ | ||
682 | " MOV D0Ar2,#0\n" \ | ||
683 | "3: ADD %2,%2,#8\n" \ | 675 | "3: ADD %2,%2,#8\n" \ |
684 | " SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
685 | " MOVT D0Ar2,#HI(1b)\n" \ | 676 | " MOVT D0Ar2,#HI(1b)\n" \ |
686 | " JUMP D0Ar2,#LO(1b)\n" \ | 677 | " JUMP D0Ar2,#LO(1b)\n" \ |
687 | " .previous\n" \ | 678 | " .previous\n" \ |
@@ -721,11 +712,12 @@ EXPORT_SYMBOL(__copy_user); | |||
721 | "SUB %1, %1, #4\n") | 712 | "SUB %1, %1, #4\n") |
722 | 713 | ||
723 | 714 | ||
724 | /* Copy from user to kernel, zeroing the bytes that were inaccessible in | 715 | /* |
725 | userland. The return-value is the number of bytes that were | 716 | * Copy from user to kernel. The return-value is the number of bytes that were |
726 | inaccessible. */ | 717 | * inaccessible. |
727 | unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | 718 | */ |
728 | unsigned long n) | 719 | unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, |
720 | unsigned long n) | ||
729 | { | 721 | { |
730 | register char *dst asm ("A0.2") = pdst; | 722 | register char *dst asm ("A0.2") = pdst; |
731 | register const char __user *src asm ("A1.2") = psrc; | 723 | register const char __user *src asm ("A1.2") = psrc; |
@@ -738,7 +730,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
738 | __asm_copy_from_user_1(dst, src, retn); | 730 | __asm_copy_from_user_1(dst, src, retn); |
739 | n--; | 731 | n--; |
740 | if (retn) | 732 | if (retn) |
741 | goto copy_exception_bytes; | 733 | return retn + n; |
742 | } | 734 | } |
743 | if ((unsigned long) dst & 1) { | 735 | if ((unsigned long) dst & 1) { |
744 | /* Worst case - byte copy */ | 736 | /* Worst case - byte copy */ |
@@ -746,14 +738,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
746 | __asm_copy_from_user_1(dst, src, retn); | 738 | __asm_copy_from_user_1(dst, src, retn); |
747 | n--; | 739 | n--; |
748 | if (retn) | 740 | if (retn) |
749 | goto copy_exception_bytes; | 741 | return retn + n; |
750 | } | 742 | } |
751 | } | 743 | } |
752 | if (((unsigned long) src & 2) && n >= 2) { | 744 | if (((unsigned long) src & 2) && n >= 2) { |
753 | __asm_copy_from_user_2(dst, src, retn); | 745 | __asm_copy_from_user_2(dst, src, retn); |
754 | n -= 2; | 746 | n -= 2; |
755 | if (retn) | 747 | if (retn) |
756 | goto copy_exception_bytes; | 748 | return retn + n; |
757 | } | 749 | } |
758 | if ((unsigned long) dst & 2) { | 750 | if ((unsigned long) dst & 2) { |
759 | /* Second worst case - word copy */ | 751 | /* Second worst case - word copy */ |
@@ -761,7 +753,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
761 | __asm_copy_from_user_2(dst, src, retn); | 753 | __asm_copy_from_user_2(dst, src, retn); |
762 | n -= 2; | 754 | n -= 2; |
763 | if (retn) | 755 | if (retn) |
764 | goto copy_exception_bytes; | 756 | return retn + n; |
765 | } | 757 | } |
766 | } | 758 | } |
767 | 759 | ||
@@ -777,7 +769,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
777 | __asm_copy_from_user_8x64(dst, src, retn); | 769 | __asm_copy_from_user_8x64(dst, src, retn); |
778 | n -= 8; | 770 | n -= 8; |
779 | if (retn) | 771 | if (retn) |
780 | goto copy_exception_bytes; | 772 | return retn + n; |
781 | } | 773 | } |
782 | } | 774 | } |
783 | 775 | ||
@@ -793,7 +785,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
793 | __asm_copy_from_user_8x64(dst, src, retn); | 785 | __asm_copy_from_user_8x64(dst, src, retn); |
794 | n -= 8; | 786 | n -= 8; |
795 | if (retn) | 787 | if (retn) |
796 | goto copy_exception_bytes; | 788 | return retn + n; |
797 | } | 789 | } |
798 | } | 790 | } |
799 | #endif | 791 | #endif |
@@ -803,7 +795,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
803 | n -= 4; | 795 | n -= 4; |
804 | 796 | ||
805 | if (retn) | 797 | if (retn) |
806 | goto copy_exception_bytes; | 798 | return retn + n; |
807 | } | 799 | } |
808 | 800 | ||
809 | /* If we get here, there were no memory read faults. */ | 801 | /* If we get here, there were no memory read faults. */ |
@@ -829,21 +821,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
829 | /* If we get here, retn correctly reflects the number of failing | 821 | /* If we get here, retn correctly reflects the number of failing |
830 | bytes. */ | 822 | bytes. */ |
831 | return retn; | 823 | return retn; |
832 | |||
833 | copy_exception_bytes: | ||
834 | /* We already have "retn" bytes cleared, and need to clear the | ||
835 | remaining "n" bytes. A non-optimized simple byte-for-byte in-line | ||
836 | memset is preferred here, since this isn't speed-critical code and | ||
837 | we'd rather have this a leaf-function than calling memset. */ | ||
838 | { | ||
839 | char *endp; | ||
840 | for (endp = dst + n; dst < endp; dst++) | ||
841 | *dst = 0; | ||
842 | } | ||
843 | |||
844 | return retn + n; | ||
845 | } | 824 | } |
846 | EXPORT_SYMBOL(__copy_user_zeroing); | 825 | EXPORT_SYMBOL(raw_copy_from_user); |
847 | 826 | ||
848 | #define __asm_clear_8x64(to, ret) \ | 827 | #define __asm_clear_8x64(to, ret) \ |
849 | asm volatile ( \ | 828 | asm volatile ( \ |