aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/copy_page.S
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-11-26 22:06:26 -0500
committerPaul Mundt <lethal@linux-sh.org>2006-12-05 20:45:39 -0500
commit510c72ad2dd4e05e6908755f51ac89482c6eb987 (patch)
treefa2e9e9a674e38dd523d937329627560f0bd6b64 /arch/sh/mm/copy_page.S
parent1dc41e58a553e612e3d0349bb60eef08f9462bde (diff)
sh: Fixup various PAGE_SIZE == 4096 assumptions.
There were a number of places that made evil PAGE_SIZE == 4k assumptions that ended up breaking when trying to play with 8k and 64k page sizes, this fixes those up. The most significant change is the way we load THREAD_SIZE, previously this was done via: mov #(THREAD_SIZE >> 8), reg shll8 reg to avoid a memory access and allow the immediate load. With a 64k PAGE_SIZE, we're out of range for the immediate load size without resorting to special instructions available in later ISAs (movi20s and so on). The "workaround" for this is to bump up the shift to 10 and insert a shll2, which gives a bit more flexibility while still being much cheaper than a memory access. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/copy_page.S')
-rw-r--r--arch/sh/mm/copy_page.S16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index 1addffe117c3..397c94c97315 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -1,12 +1,12 @@
1/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $ 1/*
2 *
3 * copy_page, __copy_user_page, __copy_user implementation of SuperH 2 * copy_page, __copy_user_page, __copy_user implementation of SuperH
4 * 3 *
5 * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima 4 * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2002 Toshinobu Sugioka 5 * Copyright (C) 2002 Toshinobu Sugioka
7 * 6 * Copyright (C) 2006 Paul Mundt
8 */ 7 */
9#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/page.h>
10 10
11/* 11/*
12 * copy_page_slow 12 * copy_page_slow
@@ -18,7 +18,7 @@
18 18
19/* 19/*
20 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 20 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
21 * r8 --- from + 4096 21 * r8 --- from + PAGE_SIZE
22 * r9 --- not used 22 * r9 --- not used
23 * r10 --- to 23 * r10 --- to
24 * r11 --- from 24 * r11 --- from
@@ -30,7 +30,7 @@ ENTRY(copy_page_slow)
30 mov r4,r10 30 mov r4,r10
31 mov r5,r11 31 mov r5,r11
32 mov r5,r8 32 mov r5,r8
33 mov.w .L4096,r0 33 mov.l .Lpsz,r0
34 add r0,r8 34 add r0,r8
35 ! 35 !
361: mov.l @r11+,r0 361: mov.l @r11+,r0
@@ -80,7 +80,7 @@ ENTRY(copy_page_slow)
80 80
81/* 81/*
82 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 82 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
83 * r8 --- from + 4096 83 * r8 --- from + PAGE_SIZE
84 * r9 --- orig_to 84 * r9 --- orig_to
85 * r10 --- to 85 * r10 --- to
86 * r11 --- from 86 * r11 --- from
@@ -94,7 +94,7 @@ ENTRY(__copy_user_page)
94 mov r5,r11 94 mov r5,r11
95 mov r6,r9 95 mov r6,r9
96 mov r5,r8 96 mov r5,r8
97 mov.w .L4096,r0 97 mov.l .Lpsz,r0
98 add r0,r8 98 add r0,r8
99 ! 99 !
1001: ocbi @r9 1001: ocbi @r9
@@ -129,7 +129,7 @@ ENTRY(__copy_user_page)
129 rts 129 rts
130 nop 130 nop
131#endif 131#endif
132.L4096: .word 4096 132.Lpsz: .long PAGE_SIZE
133/* 133/*
134 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 134 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
135 * Return the number of bytes NOT copied 135 * Return the number of bytes NOT copied