aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/lib/memcpy.S
diff options
context:
space:
mode:
authorLeonid Yegoshin <Leonid.Yegoshin@imgtec.com>2014-11-14 06:55:50 -0500
committerMarkos Chandras <markos.chandras@imgtec.com>2015-02-17 10:37:29 -0500
commitb0ce4bd535a68e5814b8470f1f8a49771f37b0a2 (patch)
tree3ffd6805a81c1b5e36a027cb26587fde3ac52e84 /arch/mips/lib/memcpy.S
parentfee313d4b880d4f68cd9d1ed013b128f836d3f21 (diff)
MIPS: lib: memcpy: Add MIPS R6 support
MIPS R6 does not support the unaligned load and store instructions so we add a special MIPS R6 case to copy one byte at a time if we need to read/write to unaligned memory addresses. Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Diffstat (limited to 'arch/mips/lib/memcpy.S')
-rw-r--r--arch/mips/lib/memcpy.S23
1 files changed, 23 insertions, 0 deletions
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 5d3238af9b5c..9245e1705e69 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -293,9 +293,14 @@
293 and t0, src, ADDRMASK 293 and t0, src, ADDRMASK
294 PREFS( 0, 2*32(src) ) 294 PREFS( 0, 2*32(src) )
295 PREFD( 1, 2*32(dst) ) 295 PREFD( 1, 2*32(dst) )
296#ifndef CONFIG_CPU_MIPSR6
296 bnez t1, .Ldst_unaligned\@ 297 bnez t1, .Ldst_unaligned\@
297 nop 298 nop
298 bnez t0, .Lsrc_unaligned_dst_aligned\@ 299 bnez t0, .Lsrc_unaligned_dst_aligned\@
300#else
301 or t0, t0, t1
302 bnez t0, .Lcopy_unaligned_bytes\@
303#endif
299 /* 304 /*
300 * use delay slot for fall-through 305 * use delay slot for fall-through
301 * src and dst are aligned; need to compute rem 306 * src and dst are aligned; need to compute rem
@@ -376,6 +381,7 @@
376 bne rem, len, 1b 381 bne rem, len, 1b
377 .set noreorder 382 .set noreorder
378 383
384#ifndef CONFIG_CPU_MIPSR6
379 /* 385 /*
380 * src and dst are aligned, need to copy rem bytes (rem < NBYTES) 386 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
381 * A loop would do only a byte at a time with possible branch 387 * A loop would do only a byte at a time with possible branch
@@ -477,6 +483,7 @@
477 bne len, rem, 1b 483 bne len, rem, 1b
478 .set noreorder 484 .set noreorder
479 485
486#endif /* !CONFIG_CPU_MIPSR6 */
480.Lcopy_bytes_checklen\@: 487.Lcopy_bytes_checklen\@:
481 beqz len, .Ldone\@ 488 beqz len, .Ldone\@
482 nop 489 nop
@@ -504,6 +511,22 @@
504.Ldone\@: 511.Ldone\@:
505 jr ra 512 jr ra
506 nop 513 nop
514
515#ifdef CONFIG_CPU_MIPSR6
516.Lcopy_unaligned_bytes\@:
5171:
518 COPY_BYTE(0)
519 COPY_BYTE(1)
520 COPY_BYTE(2)
521 COPY_BYTE(3)
522 COPY_BYTE(4)
523 COPY_BYTE(5)
524 COPY_BYTE(6)
525 COPY_BYTE(7)
526 ADD src, src, 8
527 b 1b
528 ADD dst, dst, 8
529#endif /* CONFIG_CPU_MIPSR6 */
507 .if __memcpy == 1 530 .if __memcpy == 1
508 END(memcpy) 531 END(memcpy)
509 .set __memcpy, 0 532 .set __memcpy, 0