diff options
author | James Hogan <james.hogan@imgtec.com> | 2017-03-31 06:23:18 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-04-12 06:41:13 -0400 |
commit | 49a292dcd86bd04794cfc50f87ff5aa4444df088 (patch) | |
tree | be5bbfdd1f8c78763ddf6c04fd5a3a29bad76e58 /arch/metag | |
parent | 2bb52b47e7f420bbed4192efca20a9de8ff4fa08 (diff) |
metag/usercopy: Fix alignment error checking
commit 2257211942bbbf6c798ab70b487d7e62f7835a1a upstream.
Fix the error checking of the alignment adjustment code in
raw_copy_from_user(), which mistakenly considers it safe to skip the
error check when aligning the source buffer on a 2 or 4 byte boundary.
If the destination buffer was unaligned it may have started to copy
using byte or word accesses, which could well be at the start of a new
(valid) source page. This would result in it appearing to have copied 1
or 2 bytes at the end of the first (invalid) page rather than none at
all.
Fixes: 373cd784d0fc ("metag: Memory handling")
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/metag')
-rw-r--r-- | arch/metag/lib/usercopy.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index b4eb1f17069f..a6ced9691ddb 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c | |||
@@ -717,6 +717,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
717 | if ((unsigned long) src & 1) { | 717 | if ((unsigned long) src & 1) { |
718 | __asm_copy_from_user_1(dst, src, retn); | 718 | __asm_copy_from_user_1(dst, src, retn); |
719 | n--; | 719 | n--; |
720 | if (retn) | ||
721 | goto copy_exception_bytes; | ||
720 | } | 722 | } |
721 | if ((unsigned long) dst & 1) { | 723 | if ((unsigned long) dst & 1) { |
722 | /* Worst case - byte copy */ | 724 | /* Worst case - byte copy */ |
@@ -730,6 +732,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
730 | if (((unsigned long) src & 2) && n >= 2) { | 732 | if (((unsigned long) src & 2) && n >= 2) { |
731 | __asm_copy_from_user_2(dst, src, retn); | 733 | __asm_copy_from_user_2(dst, src, retn); |
732 | n -= 2; | 734 | n -= 2; |
735 | if (retn) | ||
736 | goto copy_exception_bytes; | ||
733 | } | 737 | } |
734 | if ((unsigned long) dst & 2) { | 738 | if ((unsigned long) dst & 2) { |
735 | /* Second worst case - word copy */ | 739 | /* Second worst case - word copy */ |
@@ -741,12 +745,6 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | |||
741 | } | 745 | } |
742 | } | 746 | } |
743 | 747 | ||
744 | /* We only need one check after the unalignment-adjustments, | ||
745 | because if both adjustments were done, either both or | ||
746 | neither reference had an exception. */ | ||
747 | if (retn != 0) | ||
748 | goto copy_exception_bytes; | ||
749 | |||
750 | #ifdef USE_RAPF | 748 | #ifdef USE_RAPF |
751 | /* 64 bit copy loop */ | 749 | /* 64 bit copy loop */ |
752 | if (!(((unsigned long) src | (unsigned long) dst) & 7)) { | 750 | if (!(((unsigned long) src | (unsigned long) dst) & 7)) { |