aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-08 03:23:08 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-08 03:23:08 -0400
commita2494b9b5fb702becaf8d8e3138f7a1a0d3c537e (patch)
tree53e7670594825b2c558a9ca7b993670b259a7374
parent6e4154d4c2dd3d7e61d19ddd2527322ce34c2f5a (diff)
sh: Kill off dcache writeback from copy_page().
Now that the cache purging is handled manually by all copy_page() callers, we can kill off copy_page()'s on writeback. This optimizes the non-aliasing case. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/lib/copy_page.S11
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/sh/lib/copy_page.S b/arch/sh/lib/copy_page.S
index 43de7e8e4e17..9d7b8bc51866 100644
--- a/arch/sh/lib/copy_page.S
+++ b/arch/sh/lib/copy_page.S
@@ -30,7 +30,9 @@ ENTRY(copy_page)
30 mov r4,r10 30 mov r4,r10
31 mov r5,r11 31 mov r5,r11
32 mov r5,r8 32 mov r5,r8
33 mov.l .Lpsz,r0 33 mov #(PAGE_SIZE >> 10), r0
34 shll8 r0
35 shll2 r0
34 add r0,r8 36 add r0,r8
35 ! 37 !
361: mov.l @r11+,r0 381: mov.l @r11+,r0
@@ -43,7 +45,6 @@ ENTRY(copy_page)
43 mov.l @r11+,r7 45 mov.l @r11+,r7
44#if defined(CONFIG_CPU_SH4) 46#if defined(CONFIG_CPU_SH4)
45 movca.l r0,@r10 47 movca.l r0,@r10
46 mov r10,r0
47#else 48#else
48 mov.l r0,@r10 49 mov.l r0,@r10
49#endif 50#endif
@@ -55,9 +56,6 @@ ENTRY(copy_page)
55 mov.l r3,@-r10 56 mov.l r3,@-r10
56 mov.l r2,@-r10 57 mov.l r2,@-r10
57 mov.l r1,@-r10 58 mov.l r1,@-r10
58#if defined(CONFIG_CPU_SH4)
59 ocbwb @r0
60#endif
61 cmp/eq r11,r8 59 cmp/eq r11,r8
62 bf/s 1b 60 bf/s 1b
63 add #28,r10 61 add #28,r10
@@ -68,9 +66,6 @@ ENTRY(copy_page)
68 rts 66 rts
69 nop 67 nop
70 68
71 .balign 4
72.Lpsz: .long PAGE_SIZE
73
74/* 69/*
75 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 70 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
76 * Return the number of bytes NOT copied 71 * Return the number of bytes NOT copied