summaryrefslogtreecommitdiffstats
path: root/arch/alpha/lib
diff options
context:
space:
mode:
authorMichael Cree <mcree@orcon.net.nz>2018-01-03 03:58:00 -0500
committerMatt Turner <mattst88@gmail.com>2018-01-16 22:34:46 -0500
commit0d83620fd18e5b8d79d390e482583b379a6a986d (patch)
tree455377fa088b09c5943138a2fcb747645250156a /arch/alpha/lib
parent8cbab92dff778e516064c13113ca15d4869ec883 (diff)
alpha: extend memset16 to EV6 optimised routines
Commit 92ce4c3ea7c4, "alpha: add support for memset16", renamed the function memsetw() to be memset16() but neglected to do this for the EV6 optimised version, thus when building a kernel optimised for EV6 (or later) link errors result. This extends the memset16 support to EV6. Signed-off-by: Michael Cree <mcree@orcon.net.nz> Signed-off-by: Matt Turner <mattst88@gmail.com>
Diffstat (limited to 'arch/alpha/lib')
-rw-r--r--arch/alpha/lib/ev6-memset.S12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 316a99aa9efe..1cfcfbbea6f0 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -18,7 +18,7 @@
18 * The algorithm for the leading and trailing quadwords remains the same, 18 * The algorithm for the leading and trailing quadwords remains the same,
19 * however the loop has been unrolled to enable better memory throughput, 19 * however the loop has been unrolled to enable better memory throughput,
20 * and the code has been replicated for each of the entry points: __memset 20 * and the code has been replicated for each of the entry points: __memset
21 * and __memsetw to permit better scheduling to eliminate the stalling 21 * and __memset16 to permit better scheduling to eliminate the stalling
22 * encountered during the mask replication. 22 * encountered during the mask replication.
23 * A future enhancement might be to put in a byte store loop for really 23 * A future enhancement might be to put in a byte store loop for really
24 * small (say < 32 bytes) memset()s. Whether or not that change would be 24 * small (say < 32 bytes) memset()s. Whether or not that change would be
@@ -34,7 +34,7 @@
34 .globl memset 34 .globl memset
35 .globl __memset 35 .globl __memset
36 .globl ___memset 36 .globl ___memset
37 .globl __memsetw 37 .globl __memset16
38 .globl __constant_c_memset 38 .globl __constant_c_memset
39 39
40 .ent ___memset 40 .ent ___memset
@@ -415,9 +415,9 @@ end:
415 * to mask stalls. Note that entry point names also had to change 415 * to mask stalls. Note that entry point names also had to change
416 */ 416 */
417 .align 5 417 .align 5
418 .ent __memsetw 418 .ent __memset16
419 419
420__memsetw: 420__memset16:
421 .frame $30,0,$26,0 421 .frame $30,0,$26,0
422 .prologue 0 422 .prologue 0
423 423
@@ -596,8 +596,8 @@ end_w:
596 nop 596 nop
597 ret $31,($26),1 # L0 : 597 ret $31,($26),1 # L0 :
598 598
599 .end __memsetw 599 .end __memset16
600 EXPORT_SYMBOL(__memsetw) 600 EXPORT_SYMBOL(__memset16)
601 601
602memset = ___memset 602memset = ___memset
603__memset = ___memset 603__memset = ___memset