aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-07 15:48:15 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-07 15:48:15 -0500
commit34161db6b14d984fb9b06c735b7b42f8803f6851 (patch)
tree99656278b6697f1cde5b05894b7c0ee22c63a00e /arch/sh/mm
parent5847e1f4d058677c5e46dc6c3e3c70e8855ea3ba (diff)
parent620034c84d1d939717bdfbe02c51a3fee43541c3 (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
Conflicts: include/linux/sunrpc/xprt.h net/sunrpc/xprtsock.c Fix up conflicts with the workqueue changes.
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig77
-rw-r--r--arch/sh/mm/cache-sh2.c69
-rw-r--r--arch/sh/mm/cache-sh4.c18
-rw-r--r--arch/sh/mm/clear_page.S18
-rw-r--r--arch/sh/mm/copy_page.S16
-rw-r--r--arch/sh/mm/fault.c161
-rw-r--r--arch/sh/mm/init.c45
-rw-r--r--arch/sh/mm/ioremap.c4
-rw-r--r--arch/sh/mm/pg-dma.c2
-rw-r--r--arch/sh/mm/pg-sh4.c35
10 files changed, 223 insertions, 222 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 9dd606464d23..4e0362f50384 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -4,8 +4,12 @@ menu "Processor selection"
4# Processor families 4# Processor families
5# 5#
6config CPU_SH2 6config CPU_SH2
7 select SH_WRITETHROUGH if !CPU_SH2A
7 bool 8 bool
8 select SH_WRITETHROUGH 9
10config CPU_SH2A
11 bool
12 select CPU_SH2
9 13
10config CPU_SH3 14config CPU_SH3
11 bool 15 bool
@@ -16,6 +20,7 @@ config CPU_SH4
16 bool 20 bool
17 select CPU_HAS_INTEVT 21 select CPU_HAS_INTEVT
18 select CPU_HAS_SR_RB 22 select CPU_HAS_SR_RB
23 select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40
19 24
20config CPU_SH4A 25config CPU_SH4A
21 bool 26 bool
@@ -40,6 +45,16 @@ config CPU_SUBTYPE_SH7604
40 bool "Support SH7604 processor" 45 bool "Support SH7604 processor"
41 select CPU_SH2 46 select CPU_SH2
42 47
48config CPU_SUBTYPE_SH7619
49 bool "Support SH7619 processor"
50 select CPU_SH2
51
52comment "SH-2A Processor Support"
53
54config CPU_SUBTYPE_SH7206
55 bool "Support SH7206 processor"
56 select CPU_SH2A
57
43comment "SH-3 Processor Support" 58comment "SH-3 Processor Support"
44 59
45config CPU_SUBTYPE_SH7300 60config CPU_SUBTYPE_SH7300
@@ -89,6 +104,7 @@ comment "SH-4 Processor Support"
89config CPU_SUBTYPE_SH7750 104config CPU_SUBTYPE_SH7750
90 bool "Support SH7750 processor" 105 bool "Support SH7750 processor"
91 select CPU_SH4 106 select CPU_SH4
107 select CPU_HAS_IPR_IRQ
92 help 108 help
93 Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. 109 Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
94 110
@@ -104,15 +120,18 @@ config CPU_SUBTYPE_SH7750R
104 bool "Support SH7750R processor" 120 bool "Support SH7750R processor"
105 select CPU_SH4 121 select CPU_SH4
106 select CPU_SUBTYPE_SH7750 122 select CPU_SUBTYPE_SH7750
123 select CPU_HAS_IPR_IRQ
107 124
108config CPU_SUBTYPE_SH7750S 125config CPU_SUBTYPE_SH7750S
109 bool "Support SH7750S processor" 126 bool "Support SH7750S processor"
110 select CPU_SH4 127 select CPU_SH4
111 select CPU_SUBTYPE_SH7750 128 select CPU_SUBTYPE_SH7750
129 select CPU_HAS_IPR_IRQ
112 130
113config CPU_SUBTYPE_SH7751 131config CPU_SUBTYPE_SH7751
114 bool "Support SH7751 processor" 132 bool "Support SH7751 processor"
115 select CPU_SH4 133 select CPU_SH4
134 select CPU_HAS_IPR_IRQ
116 help 135 help
117 Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, 136 Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
118 or if you have a HD6417751R CPU. 137 or if you have a HD6417751R CPU.
@@ -121,6 +140,7 @@ config CPU_SUBTYPE_SH7751R
121 bool "Support SH7751R processor" 140 bool "Support SH7751R processor"
122 select CPU_SH4 141 select CPU_SH4
123 select CPU_SUBTYPE_SH7751 142 select CPU_SUBTYPE_SH7751
143 select CPU_HAS_IPR_IRQ
124 144
125config CPU_SUBTYPE_SH7760 145config CPU_SUBTYPE_SH7760
126 bool "Support SH7760 processor" 146 bool "Support SH7760 processor"
@@ -157,6 +177,11 @@ config CPU_SUBTYPE_SH7780
157 select CPU_SH4A 177 select CPU_SH4A
158 select CPU_HAS_INTC2_IRQ 178 select CPU_HAS_INTC2_IRQ
159 179
180config CPU_SUBTYPE_SH7785
181 bool "Support SH7785 processor"
182 select CPU_SH4A
183 select CPU_HAS_INTC2_IRQ
184
160comment "SH4AL-DSP Processor Support" 185comment "SH4AL-DSP Processor Support"
161 186
162config CPU_SUBTYPE_SH73180 187config CPU_SUBTYPE_SH73180
@@ -216,13 +241,22 @@ config MEMORY_SIZE
216 241
217config 32BIT 242config 32BIT
218 bool "Support 32-bit physical addressing through PMB" 243 bool "Support 32-bit physical addressing through PMB"
219 depends on CPU_SH4A && MMU 244 depends on CPU_SH4A && MMU && (!X2TLB || BROKEN)
220 default y 245 default y
221 help 246 help
222 If you say Y here, physical addressing will be extended to 247 If you say Y here, physical addressing will be extended to
223 32-bits through the SH-4A PMB. If this is not set, legacy 248 32-bits through the SH-4A PMB. If this is not set, legacy
224 29-bit physical addressing will be used. 249 29-bit physical addressing will be used.
225 250
251config X2TLB
252 bool "Enable extended TLB mode"
253 depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL
254 help
255 Selecting this option will enable the extended mode of the SH-X2
256 TLB. For legacy SH-X behaviour and interoperability, say N. For
257 all of the fun new features and a willingless to submit bug reports,
258 say Y.
259
226config VSYSCALL 260config VSYSCALL
227 bool "Support vsyscall page" 261 bool "Support vsyscall page"
228 depends on MMU 262 depends on MMU
@@ -237,16 +271,52 @@ config VSYSCALL
237 (the default value) say Y. 271 (the default value) say Y.
238 272
239choice 273choice
274 prompt "Kernel page size"
275 default PAGE_SIZE_4KB
276
277config PAGE_SIZE_4KB
278 bool "4kB"
279 help
280 This is the default page size used by all SuperH CPUs.
281
282config PAGE_SIZE_8KB
283 bool "8kB"
284 depends on EXPERIMENTAL && X2TLB
285 help
286 This enables 8kB pages as supported by SH-X2 and later MMUs.
287
288config PAGE_SIZE_64KB
289 bool "64kB"
290 depends on EXPERIMENTAL && CPU_SH4
291 help
292 This enables support for 64kB pages, possible on all SH-4
293 CPUs and later. Highly experimental, not recommended.
294
295endchoice
296
297choice
240 prompt "HugeTLB page size" 298 prompt "HugeTLB page size"
241 depends on HUGETLB_PAGE && CPU_SH4 && MMU 299 depends on HUGETLB_PAGE && CPU_SH4 && MMU
242 default HUGETLB_PAGE_SIZE_64K 300 default HUGETLB_PAGE_SIZE_64K
243 301
244config HUGETLB_PAGE_SIZE_64K 302config HUGETLB_PAGE_SIZE_64K
245 bool "64K" 303 bool "64kB"
304
305config HUGETLB_PAGE_SIZE_256K
306 bool "256kB"
307 depends on X2TLB
246 308
247config HUGETLB_PAGE_SIZE_1MB 309config HUGETLB_PAGE_SIZE_1MB
248 bool "1MB" 310 bool "1MB"
249 311
312config HUGETLB_PAGE_SIZE_4MB
313 bool "4MB"
314 depends on X2TLB
315
316config HUGETLB_PAGE_SIZE_64MB
317 bool "64MB"
318 depends on X2TLB
319
250endchoice 320endchoice
251 321
252source "mm/Kconfig" 322source "mm/Kconfig"
@@ -274,7 +344,6 @@ config SH_DIRECT_MAPPED
274 344
275config SH_WRITETHROUGH 345config SH_WRITETHROUGH
276 bool "Use write-through caching" 346 bool "Use write-through caching"
277 default y if CPU_SH2
278 help 347 help
279 Selecting this option will configure the caches in write-through 348 Selecting this option will configure the caches in write-through
280 mode, as opposed to the default write-back configuration. 349 mode, as opposed to the default write-back configuration.
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index 2689cb24ea2b..6614033f6be9 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Released under the terms of the GNU GPL v2.0. 6 * Released under the terms of the GNU GPL v2.0.
7 */ 7 */
8
8#include <linux/init.h> 9#include <linux/init.h>
9#include <linux/mm.h> 10#include <linux/mm.h>
10 11
@@ -14,37 +15,43 @@
14#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
15#include <asm/io.h> 16#include <asm/io.h>
16 17
17/* 18void __flush_wback_region(void *start, int size)
18 * Calculate the OC address and set the way bit on the SH-2.
19 *
20 * We must have already jump_to_P2()'ed prior to calling this
21 * function, since we rely on CCR manipulation to do the
22 * Right Thing(tm).
23 */
24unsigned long __get_oc_addr(unsigned long set, unsigned long way)
25{ 19{
26 unsigned long ccr; 20 unsigned long v;
27 21 unsigned long begin, end;
28 /* 22
29 * On SH-2 the way bit isn't tracked in the address field 23 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
30 * if we're doing address array access .. instead, we need 24 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
31 * to manually switch out the way in the CCR. 25 & ~(L1_CACHE_BYTES-1);
32 */ 26 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
33 ccr = ctrl_inl(CCR); 27 /* FIXME cache purge */
34 ccr &= ~0x00c0; 28 ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
35 ccr |= way << cpu_data->dcache.way_shift; 29 }
36 30}
37 /* 31
38 * Despite the number of sets being halved, we end up losing 32void __flush_purge_region(void *start, int size)
39 * the first 2 ways to OCRAM instead of the last 2 (if we're 33{
40 * 4-way). As a result, forcibly setting the W1 bit handily 34 unsigned long v;
41 * bumps us up 2 ways. 35 unsigned long begin, end;
42 */ 36
43 if (ccr & CCR_CACHE_ORA) 37 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
44 ccr |= 1 << (cpu_data->dcache.way_shift + 1); 38 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
45 39 & ~(L1_CACHE_BYTES-1);
46 ctrl_outl(ccr, CCR); 40 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
47 41 ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
48 return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift); 42 }
43}
44
45void __flush_invalidate_region(void *start, int size)
46{
47 unsigned long v;
48 unsigned long begin, end;
49
50 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
51 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
52 & ~(L1_CACHE_BYTES-1);
53 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
54 ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
55 }
49} 56}
50 57
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index e48cc22724d9..ae531affccbd 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -11,12 +11,8 @@
11 */ 11 */
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <asm/addrspace.h> 14#include <linux/io.h>
15#include <asm/pgtable.h> 15#include <linux/mutex.h>
16#include <asm/processor.h>
17#include <asm/cache.h>
18#include <asm/io.h>
19#include <asm/pgalloc.h>
20#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
21#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
22 18
@@ -83,9 +79,9 @@ static void __init emit_cache_params(void)
83 */ 79 */
84 80
85/* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ 81/* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
86#define MAX_P3_SEMAPHORES 16 82#define MAX_P3_MUTEXES 16
87 83
88struct semaphore p3map_sem[MAX_P3_SEMAPHORES]; 84struct mutex p3map_mutex[MAX_P3_MUTEXES];
89 85
90void __init p3_cache_init(void) 86void __init p3_cache_init(void)
91{ 87{
@@ -115,7 +111,7 @@ void __init p3_cache_init(void)
115 panic("%s failed.", __FUNCTION__); 111 panic("%s failed.", __FUNCTION__);
116 112
117 for (i = 0; i < cpu_data->dcache.n_aliases; i++) 113 for (i = 0; i < cpu_data->dcache.n_aliases; i++)
118 sema_init(&p3map_sem[i], 1); 114 mutex_init(&p3map_mutex[i]);
119} 115}
120 116
121/* 117/*
@@ -229,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start,
229 */ 225 */
230 if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || 226 if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
231 (start < CACHE_OC_ADDRESS_ARRAY)) 227 (start < CACHE_OC_ADDRESS_ARRAY))
232 exec_offset = 0x20000000; 228 exec_offset = 0x20000000;
233 229
234 local_irq_save(flags); 230 local_irq_save(flags);
235 __flush_cache_4096(start | SH_CACHE_ASSOC, 231 __flush_cache_4096(start | SH_CACHE_ASSOC,
@@ -250,7 +246,7 @@ void flush_dcache_page(struct page *page)
250 246
251 /* Loop all the D-cache */ 247 /* Loop all the D-cache */
252 n = cpu_data->dcache.n_aliases; 248 n = cpu_data->dcache.n_aliases;
253 for (i = 0; i < n; i++, addr += PAGE_SIZE) 249 for (i = 0; i < n; i++, addr += 4096)
254 flush_cache_4096(addr, phys); 250 flush_cache_4096(addr, phys);
255 } 251 }
256 252
diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S
index 7b96425ae270..8a706131e521 100644
--- a/arch/sh/mm/clear_page.S
+++ b/arch/sh/mm/clear_page.S
@@ -1,12 +1,12 @@
1/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $ 1/*
2 *
3 * __clear_user_page, __clear_user, clear_page implementation of SuperH 2 * __clear_user_page, __clear_user, clear_page implementation of SuperH
4 * 3 *
5 * Copyright (C) 2001 Kaz Kojima 4 * Copyright (C) 2001 Kaz Kojima
6 * Copyright (C) 2001, 2002 Niibe Yutaka 5 * Copyright (C) 2001, 2002 Niibe Yutaka
7 * 6 * Copyright (C) 2006 Paul Mundt
8 */ 7 */
9#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/page.h>
10 10
11/* 11/*
12 * clear_page_slow 12 * clear_page_slow
@@ -18,11 +18,11 @@
18/* 18/*
19 * r0 --- scratch 19 * r0 --- scratch
20 * r4 --- to 20 * r4 --- to
21 * r5 --- to + 4096 21 * r5 --- to + PAGE_SIZE
22 */ 22 */
23ENTRY(clear_page_slow) 23ENTRY(clear_page_slow)
24 mov r4,r5 24 mov r4,r5
25 mov.w .Llimit,r0 25 mov.l .Llimit,r0
26 add r0,r5 26 add r0,r5
27 mov #0,r0 27 mov #0,r0
28 ! 28 !
@@ -50,7 +50,7 @@ ENTRY(clear_page_slow)
50 ! 50 !
51 rts 51 rts
52 nop 52 nop
53.Llimit: .word (4096-28) 53.Llimit: .long (PAGE_SIZE-28)
54 54
55ENTRY(__clear_user) 55ENTRY(__clear_user)
56 ! 56 !
@@ -164,10 +164,10 @@ ENTRY(__clear_user)
164 * r0 --- scratch 164 * r0 --- scratch
165 * r4 --- to 165 * r4 --- to
166 * r5 --- orig_to 166 * r5 --- orig_to
167 * r6 --- to + 4096 167 * r6 --- to + PAGE_SIZE
168 */ 168 */
169ENTRY(__clear_user_page) 169ENTRY(__clear_user_page)
170 mov.w .L4096,r0 170 mov.l .Lpsz,r0
171 mov r4,r6 171 mov r4,r6
172 add r0,r6 172 add r0,r6
173 mov #0,r0 173 mov #0,r0
@@ -191,7 +191,7 @@ ENTRY(__clear_user_page)
191 ! 191 !
192 rts 192 rts
193 nop 193 nop
194.L4096: .word 4096 194.Lpsz: .long PAGE_SIZE
195 195
196#endif 196#endif
197 197
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index 1addffe117c3..397c94c97315 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -1,12 +1,12 @@
1/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $ 1/*
2 *
3 * copy_page, __copy_user_page, __copy_user implementation of SuperH 2 * copy_page, __copy_user_page, __copy_user implementation of SuperH
4 * 3 *
5 * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima 4 * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2002 Toshinobu Sugioka 5 * Copyright (C) 2002 Toshinobu Sugioka
7 * 6 * Copyright (C) 2006 Paul Mundt
8 */ 7 */
9#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/page.h>
10 10
11/* 11/*
12 * copy_page_slow 12 * copy_page_slow
@@ -18,7 +18,7 @@
18 18
19/* 19/*
20 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 20 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
21 * r8 --- from + 4096 21 * r8 --- from + PAGE_SIZE
22 * r9 --- not used 22 * r9 --- not used
23 * r10 --- to 23 * r10 --- to
24 * r11 --- from 24 * r11 --- from
@@ -30,7 +30,7 @@ ENTRY(copy_page_slow)
30 mov r4,r10 30 mov r4,r10
31 mov r5,r11 31 mov r5,r11
32 mov r5,r8 32 mov r5,r8
33 mov.w .L4096,r0 33 mov.l .Lpsz,r0
34 add r0,r8 34 add r0,r8
35 ! 35 !
361: mov.l @r11+,r0 361: mov.l @r11+,r0
@@ -80,7 +80,7 @@ ENTRY(copy_page_slow)
80 80
81/* 81/*
82 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 82 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
83 * r8 --- from + 4096 83 * r8 --- from + PAGE_SIZE
84 * r9 --- orig_to 84 * r9 --- orig_to
85 * r10 --- to 85 * r10 --- to
86 * r11 --- from 86 * r11 --- from
@@ -94,7 +94,7 @@ ENTRY(__copy_user_page)
94 mov r5,r11 94 mov r5,r11
95 mov r6,r9 95 mov r6,r9
96 mov r5,r8 96 mov r5,r8
97 mov.w .L4096,r0 97 mov.l .Lpsz,r0
98 add r0,r8 98 add r0,r8
99 ! 99 !
1001: ocbi @r9 1001: ocbi @r9
@@ -129,7 +129,7 @@ ENTRY(__copy_user_page)
129 rts 129 rts
130 nop 130 nop
131#endif 131#endif
132.L4096: .word 4096 132.Lpsz: .long PAGE_SIZE
133/* 133/*
134 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 134 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
135 * Return the number of bytes NOT copied 135 * Return the number of bytes NOT copied
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 68663b8f99ae..716ebf568af2 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -26,13 +26,19 @@ extern void die(const char *,struct pt_regs *,long);
26 * and the problem, and then passes it off to one of the appropriate 26 * and the problem, and then passes it off to one of the appropriate
27 * routines. 27 * routines.
28 */ 28 */
29asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, 29asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
30 unsigned long address) 30 unsigned long writeaccess,
31 unsigned long address)
31{ 32{
32 struct task_struct *tsk; 33 struct task_struct *tsk;
33 struct mm_struct *mm; 34 struct mm_struct *mm;
34 struct vm_area_struct * vma; 35 struct vm_area_struct * vma;
35 unsigned long page; 36 unsigned long page;
37 int si_code;
38 siginfo_t info;
39
40 trace_hardirqs_on();
41 local_irq_enable();
36 42
37#ifdef CONFIG_SH_KGDB 43#ifdef CONFIG_SH_KGDB
38 if (kgdb_nofault && kgdb_bus_err_hook) 44 if (kgdb_nofault && kgdb_bus_err_hook)
@@ -41,6 +47,46 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
41 47
42 tsk = current; 48 tsk = current;
43 mm = tsk->mm; 49 mm = tsk->mm;
50 si_code = SEGV_MAPERR;
51
52 if (unlikely(address >= TASK_SIZE)) {
53 /*
54 * Synchronize this task's top level page-table
55 * with the 'reference' page table.
56 *
57 * Do _not_ use "tsk" here. We might be inside
58 * an interrupt in the middle of a task switch..
59 */
60 int offset = pgd_index(address);
61 pgd_t *pgd, *pgd_k;
62 pud_t *pud, *pud_k;
63 pmd_t *pmd, *pmd_k;
64
65 pgd = get_TTB() + offset;
66 pgd_k = swapper_pg_dir + offset;
67
68 /* This will never happen with the folded page table. */
69 if (!pgd_present(*pgd)) {
70 if (!pgd_present(*pgd_k))
71 goto bad_area_nosemaphore;
72 set_pgd(pgd, *pgd_k);
73 return;
74 }
75
76 pud = pud_offset(pgd, address);
77 pud_k = pud_offset(pgd_k, address);
78 if (pud_present(*pud) || !pud_present(*pud_k))
79 goto bad_area_nosemaphore;
80 set_pud(pud, *pud_k);
81
82 pmd = pmd_offset(pud, address);
83 pmd_k = pmd_offset(pud_k, address);
84 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
85 goto bad_area_nosemaphore;
86 set_pmd(pmd, *pmd_k);
87
88 return;
89 }
44 90
45 /* 91 /*
46 * If we're in an interrupt or have no user 92 * If we're in an interrupt or have no user
@@ -65,6 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
65 * we can handle it.. 111 * we can handle it..
66 */ 112 */
67good_area: 113good_area:
114 si_code = SEGV_ACCERR;
68 if (writeaccess) { 115 if (writeaccess) {
69 if (!(vma->vm_flags & VM_WRITE)) 116 if (!(vma->vm_flags & VM_WRITE))
70 goto bad_area; 117 goto bad_area;
@@ -104,10 +151,13 @@ survive:
104bad_area: 151bad_area:
105 up_read(&mm->mmap_sem); 152 up_read(&mm->mmap_sem);
106 153
154bad_area_nosemaphore:
107 if (user_mode(regs)) { 155 if (user_mode(regs)) {
108 tsk->thread.address = address; 156 info.si_signo = SIGSEGV;
109 tsk->thread.error_code = writeaccess; 157 info.si_errno = 0;
110 force_sig(SIGSEGV, tsk); 158 info.si_code = si_code;
159 info.si_addr = (void *) address;
160 force_sig_info(SIGSEGV, &info, tsk);
111 return; 161 return;
112 } 162 }
113 163
@@ -127,11 +177,9 @@ no_context:
127 printk(KERN_ALERT "Unable to handle kernel paging request"); 177 printk(KERN_ALERT "Unable to handle kernel paging request");
128 printk(" at virtual address %08lx\n", address); 178 printk(" at virtual address %08lx\n", address);
129 printk(KERN_ALERT "pc = %08lx\n", regs->pc); 179 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
130 asm volatile("mov.l %1, %0" 180 page = (unsigned long)get_TTB();
131 : "=r" (page)
132 : "m" (__m(MMU_TTB)));
133 if (page) { 181 if (page) {
134 page = ((unsigned long *) page)[address >> 22]; 182 page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
135 printk(KERN_ALERT "*pde = %08lx\n", page); 183 printk(KERN_ALERT "*pde = %08lx\n", page);
136 if (page & _PAGE_PRESENT) { 184 if (page & _PAGE_PRESENT) {
137 page &= PAGE_MASK; 185 page &= PAGE_MASK;
@@ -166,98 +214,13 @@ do_sigbus:
166 * Send a sigbus, regardless of whether we were in kernel 214 * Send a sigbus, regardless of whether we were in kernel
167 * or user mode. 215 * or user mode.
168 */ 216 */
169 tsk->thread.address = address; 217 info.si_signo = SIGBUS;
170 tsk->thread.error_code = writeaccess; 218 info.si_errno = 0;
171 tsk->thread.trap_no = 14; 219 info.si_code = BUS_ADRERR;
172 force_sig(SIGBUS, tsk); 220 info.si_addr = (void *)address;
221 force_sig_info(SIGBUS, &info, tsk);
173 222
174 /* Kernel mode? Handle exceptions or die */ 223 /* Kernel mode? Handle exceptions or die */
175 if (!user_mode(regs)) 224 if (!user_mode(regs))
176 goto no_context; 225 goto no_context;
177} 226}
178
179#ifdef CONFIG_SH_STORE_QUEUES
180/*
181 * This is a special case for the SH-4 store queues, as pages for this
182 * space still need to be faulted in before it's possible to flush the
183 * store queue cache for writeout to the remapped region.
184 */
185#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
186#else
187#define P3_ADDR_MAX P4SEG
188#endif
189
190/*
191 * Called with interrupts disabled.
192 */
193asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
194 unsigned long writeaccess,
195 unsigned long address)
196{
197 pgd_t *pgd;
198 pud_t *pud;
199 pmd_t *pmd;
200 pte_t *pte;
201 pte_t entry;
202 struct mm_struct *mm = current->mm;
203 spinlock_t *ptl;
204 int ret = 1;
205
206#ifdef CONFIG_SH_KGDB
207 if (kgdb_nofault && kgdb_bus_err_hook)
208 kgdb_bus_err_hook();
209#endif
210
211 /*
212 * We don't take page faults for P1, P2, and parts of P4, these
213 * are always mapped, whether it be due to legacy behaviour in
214 * 29-bit mode, or due to PMB configuration in 32-bit mode.
215 */
216 if (address >= P3SEG && address < P3_ADDR_MAX) {
217 pgd = pgd_offset_k(address);
218 mm = NULL;
219 } else {
220 if (unlikely(address >= TASK_SIZE || !mm))
221 return 1;
222
223 pgd = pgd_offset(mm, address);
224 }
225
226 pud = pud_offset(pgd, address);
227 if (pud_none_or_clear_bad(pud))
228 return 1;
229 pmd = pmd_offset(pud, address);
230 if (pmd_none_or_clear_bad(pmd))
231 return 1;
232
233 if (mm)
234 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
235 else
236 pte = pte_offset_kernel(pmd, address);
237
238 entry = *pte;
239 if (unlikely(pte_none(entry) || pte_not_present(entry)))
240 goto unlock;
241 if (unlikely(writeaccess && !pte_write(entry)))
242 goto unlock;
243
244 if (writeaccess)
245 entry = pte_mkdirty(entry);
246 entry = pte_mkyoung(entry);
247
248#ifdef CONFIG_CPU_SH4
249 /*
250 * ITLB is not affected by "ldtlb" instruction.
251 * So, we need to flush the entry by ourselves.
252 */
253 __flush_tlb_page(get_asid(), address & PAGE_MASK);
254#endif
255
256 set_pte(pte, entry);
257 update_mmu_cache(NULL, address, entry);
258 ret = 0;
259unlock:
260 if (mm)
261 pte_unmap_unlock(pte, ptl);
262 return ret;
263}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 7154d1ce9785..59f4cc18235b 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -84,30 +84,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
84 pmd_t *pmd; 84 pmd_t *pmd;
85 pte_t *pte; 85 pte_t *pte;
86 86
87 pgd = swapper_pg_dir + pgd_index(addr); 87 pgd = pgd_offset_k(addr);
88 if (pgd_none(*pgd)) { 88 if (pgd_none(*pgd)) {
89 pgd_ERROR(*pgd); 89 pgd_ERROR(*pgd);
90 return; 90 return;
91 } 91 }
92 92
93 pud = pud_offset(pgd, addr); 93 pud = pud_alloc(NULL, pgd, addr);
94 if (pud_none(*pud)) { 94 if (unlikely(!pud)) {
95 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); 95 pud_ERROR(*pud);
96 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); 96 return;
97 if (pmd != pmd_offset(pud, 0)) {
98 pud_ERROR(*pud);
99 return;
100 }
101 } 97 }
102 98
103 pmd = pmd_offset(pud, addr); 99 pmd = pmd_alloc(NULL, pud, addr);
104 if (pmd_none(*pmd)) { 100 if (unlikely(!pmd)) {
105 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); 101 pmd_ERROR(*pmd);
106 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); 102 return;
107 if (pte != pte_offset_kernel(pmd, 0)) {
108 pmd_ERROR(*pmd);
109 return;
110 }
111 } 103 }
112 104
113 pte = pte_offset_kernel(pmd, addr); 105 pte = pte_offset_kernel(pmd, addr);
@@ -155,9 +147,6 @@ extern char __init_begin, __init_end;
155 147
156/* 148/*
157 * paging_init() sets up the page tables 149 * paging_init() sets up the page tables
158 *
159 * This routines also unmaps the page at virtual kernel address 0, so
160 * that we can trap those pesky NULL-reference errors in the kernel.
161 */ 150 */
162void __init paging_init(void) 151void __init paging_init(void)
163{ 152{
@@ -180,14 +169,11 @@ void __init paging_init(void)
180 */ 169 */
181 { 170 {
182 unsigned long max_dma, low, start_pfn; 171 unsigned long max_dma, low, start_pfn;
183 pgd_t *pg_dir;
184 int i;
185
186 /* We don't need kernel mapping as hardware support that. */
187 pg_dir = swapper_pg_dir;
188 172
189 for (i = 0; i < PTRS_PER_PGD; i++) 173 /* We don't need to map the kernel through the TLB, as
190 pgd_val(pg_dir[i]) = 0; 174 * it is permanatly mapped using P1. So clear the
175 * entire pgd. */
176 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
191 177
192 /* Turn on the MMU */ 178 /* Turn on the MMU */
193 enable_mmu(); 179 enable_mmu();
@@ -206,6 +192,10 @@ void __init paging_init(void)
206 } 192 }
207 } 193 }
208 194
195 /* Set an initial value for the MMU.TTB so we don't have to
196 * check for a null value. */
197 set_TTB(swapper_pg_dir);
198
209#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) 199#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
210 /* 200 /*
211 * If we don't have CONFIG_MMU set and the processor in question 201 * If we don't have CONFIG_MMU set and the processor in question
@@ -227,7 +217,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc;
227 217
228void __init mem_init(void) 218void __init mem_init(void)
229{ 219{
230 extern unsigned long empty_zero_page[1024];
231 int codesize, reservedpages, datasize, initsize; 220 int codesize, reservedpages, datasize, initsize;
232 int tmp; 221 int tmp;
233 extern unsigned long memory_start; 222 extern unsigned long memory_start;
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index a9fe80cfc233..11d54c149821 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -28,9 +28,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
28{ 28{
29 unsigned long end; 29 unsigned long end;
30 unsigned long pfn; 30 unsigned long pfn;
31 pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | 31 pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
32 _PAGE_DIRTY | _PAGE_ACCESSED |
33 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
34 32
35 address &= ~PMD_MASK; 33 address &= ~PMD_MASK;
36 end = address + size; 34 end = address + size;
diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c
index 1406d2e348ca..bb23679369d6 100644
--- a/arch/sh/mm/pg-dma.c
+++ b/arch/sh/mm/pg-dma.c
@@ -39,8 +39,6 @@ static void copy_page_dma(void *to, void *from)
39 39
40static void clear_page_dma(void *to) 40static void clear_page_dma(void *to)
41{ 41{
42 extern unsigned long empty_zero_page[1024];
43
44 /* 42 /*
45 * We get invoked quite early on, if the DMAC hasn't been initialized 43 * We get invoked quite early on, if the DMAC hasn't been initialized
46 * yet, fall back on the slow manual implementation. 44 * yet, fall back on the slow manual implementation.
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 07371ed7a313..3f98d2a4f936 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -6,22 +6,12 @@
6 * 6 *
7 * Released under the terms of the GNU GPL v2.0. 7 * Released under the terms of the GNU GPL v2.0.
8 */ 8 */
9#include <linux/init.h>
10#include <linux/mman.h>
11#include <linux/mm.h> 9#include <linux/mm.h>
12#include <linux/threads.h> 10#include <linux/mutex.h>
13#include <asm/addrspace.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16#include <asm/processor.h>
17#include <asm/cache.h>
18#include <asm/io.h>
19#include <asm/uaccess.h>
20#include <asm/pgalloc.h>
21#include <asm/mmu_context.h> 11#include <asm/mmu_context.h>
22#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
23 13
24extern struct semaphore p3map_sem[]; 14extern struct mutex p3map_mutex[];
25 15
26#define CACHE_ALIAS (cpu_data->dcache.alias_mask) 16#define CACHE_ALIAS (cpu_data->dcache.alias_mask)
27 17
@@ -37,10 +27,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
37 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) 27 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
38 clear_page(to); 28 clear_page(to);
39 else { 29 else {
40 pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
41 _PAGE_RW | _PAGE_CACHABLE |
42 _PAGE_DIRTY | _PAGE_ACCESSED |
43 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
44 unsigned long phys_addr = PHYSADDR(to); 30 unsigned long phys_addr = PHYSADDR(to);
45 unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); 31 unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
46 pgd_t *pgd = pgd_offset_k(p3_addr); 32 pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -50,8 +36,8 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
50 pte_t entry; 36 pte_t entry;
51 unsigned long flags; 37 unsigned long flags;
52 38
53 entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); 39 entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
54 down(&p3map_sem[(address & CACHE_ALIAS)>>12]); 40 mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
55 set_pte(pte, entry); 41 set_pte(pte, entry);
56 local_irq_save(flags); 42 local_irq_save(flags);
57 __flush_tlb_page(get_asid(), p3_addr); 43 __flush_tlb_page(get_asid(), p3_addr);
@@ -59,7 +45,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
59 update_mmu_cache(NULL, p3_addr, entry); 45 update_mmu_cache(NULL, p3_addr, entry);
60 __clear_user_page((void *)p3_addr, to); 46 __clear_user_page((void *)p3_addr, to);
61 pte_clear(&init_mm, p3_addr, pte); 47 pte_clear(&init_mm, p3_addr, pte);
62 up(&p3map_sem[(address & CACHE_ALIAS)>>12]); 48 mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
63 } 49 }
64} 50}
65 51
@@ -77,10 +63,6 @@ void copy_user_page(void *to, void *from, unsigned long address,
77 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) 63 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
78 copy_page(to, from); 64 copy_page(to, from);
79 else { 65 else {
80 pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
81 _PAGE_RW | _PAGE_CACHABLE |
82 _PAGE_DIRTY | _PAGE_ACCESSED |
83 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
84 unsigned long phys_addr = PHYSADDR(to); 66 unsigned long phys_addr = PHYSADDR(to);
85 unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); 67 unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
86 pgd_t *pgd = pgd_offset_k(p3_addr); 68 pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -90,8 +72,8 @@ void copy_user_page(void *to, void *from, unsigned long address,
90 pte_t entry; 72 pte_t entry;
91 unsigned long flags; 73 unsigned long flags;
92 74
93 entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); 75 entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
94 down(&p3map_sem[(address & CACHE_ALIAS)>>12]); 76 mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
95 set_pte(pte, entry); 77 set_pte(pte, entry);
96 local_irq_save(flags); 78 local_irq_save(flags);
97 __flush_tlb_page(get_asid(), p3_addr); 79 __flush_tlb_page(get_asid(), p3_addr);
@@ -99,7 +81,7 @@ void copy_user_page(void *to, void *from, unsigned long address,
99 update_mmu_cache(NULL, p3_addr, entry); 81 update_mmu_cache(NULL, p3_addr, entry);
100 __copy_user_page((void *)p3_addr, from, to); 82 __copy_user_page((void *)p3_addr, from, to);
101 pte_clear(&init_mm, p3_addr, pte); 83 pte_clear(&init_mm, p3_addr, pte);
102 up(&p3map_sem[(address & CACHE_ALIAS)>>12]); 84 mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
103 } 85 }
104} 86}
105 87
@@ -122,4 +104,3 @@ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t
122 } 104 }
123 return pte; 105 return pte;
124} 106}
125