aboutsummaryrefslogtreecommitdiffstats
path: root/arch/metag
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2017-04-03 12:41:40 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-12 06:41:13 -0400
commitce154d517ae45fe20c62a0c563e9b0858519259f (patch)
treee0ca149f11c8a1bfc9a8211f5ca3a69f24785896 /arch/metag
parent4f3f0dd2a75b3a25198492cd815ea82de9cef8a7 (diff)
metag/usercopy: Fix src fixup in from user rapf loops
commit 2c0b1df88b987a12d95ea1d6beaf01894f3cc725 upstream. The fixup code to rewind the source pointer in __asm_copy_from_user_{32,64}bit_rapf_loop() always rewound the source by a single unit (4 or 8 bytes), however this is insufficient if the fault didn't occur on the first load in the loop, as the source pointer will have been incremented but nothing will have been stored until all 4 register [pairs] are loaded. Read the LSM_STEP field of TXSTATUS (which is already loaded into a register), a bit like the copy_to_user versions, to determine how many iterations of MGET[DL] have taken place, all of which need rewinding. Fixes: 373cd784d0fc ("metag: Memory handling") Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: linux-metag@vger.kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/metag')
-rw-r--r--arch/metag/lib/usercopy.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index 4422928a1746..e09c95ba028c 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -687,29 +687,49 @@ EXPORT_SYMBOL(__copy_user);
687 * 687 *
688 * Rationale: 688 * Rationale:
689 * A fault occurs while reading from user buffer, which is the 689 * A fault occurs while reading from user buffer, which is the
690 * source. Since the fault is at a single address, we only 690 * source.
691 * need to rewind by 8 bytes.
692 * Since we don't write to kernel buffer until we read first, 691 * Since we don't write to kernel buffer until we read first,
693 * the kernel buffer is at the right state and needn't be 692 * the kernel buffer is at the right state and needn't be
694 * corrected. 693 * corrected, but the source must be rewound to the beginning of
694 * the block, which is LSM_STEP*8 bytes.
695 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
696 * and stored in D0Ar2
697 *
698 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
699 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
700 * a fault happens at the 4th write, LSM_STEP will be 0
701 * instead of 4. The code copes with that.
695 */ 702 */
696#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ 703#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
697 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 704 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
698 "SUB %1, %1, #8\n") 705 "LSR D0Ar2, D0Ar2, #5\n" \
706 "ANDS D0Ar2, D0Ar2, #0x38\n" \
707 "ADDZ D0Ar2, D0Ar2, #32\n" \
708 "SUB %1, %1, D0Ar2\n")
699 709
700/* rewind 'from' pointer when a fault occurs 710/* rewind 'from' pointer when a fault occurs
701 * 711 *
702 * Rationale: 712 * Rationale:
703 * A fault occurs while reading from user buffer, which is the 713 * A fault occurs while reading from user buffer, which is the
704 * source. Since the fault is at a single address, we only 714 * source.
705 * need to rewind by 4 bytes.
706 * Since we don't write to kernel buffer until we read first, 715 * Since we don't write to kernel buffer until we read first,
707 * the kernel buffer is at the right state and needn't be 716 * the kernel buffer is at the right state and needn't be
708 * corrected. 717 * corrected, but the source must be rewound to the beginning of
718 * the block, which is LSM_STEP*4 bytes.
719 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
720 * and stored in D0Ar2
721 *
722 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
723 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
724 * a fault happens at the 4th write, LSM_STEP will be 0
725 * instead of 4. The code copes with that.
709 */ 726 */
710#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ 727#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
711 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 728 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
712 "SUB %1, %1, #4\n") 729 "LSR D0Ar2, D0Ar2, #6\n" \
730 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
731 "ADDZ D0Ar2, D0Ar2, #16\n" \
732 "SUB %1, %1, D0Ar2\n")
713 733
714 734
715/* 735/*