aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 02:13:36 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 02:13:36 -0400
commit26ff6c11ef38e08990c1e417c299246e6ab18ff7 (patch)
treeebd37fd0270b7c7dfe8474a046663db78fcdb1ab /arch/sh
parent9359e757709a211040e4b0151eae69248e7c6eca (diff)
sh: page table alloc cleanups and page fault optimizations.
Cleanup of page table allocators, using generic folded PMD and PUD helpers. TLB flushing operations are moved to a more sensible spot. The page fault handler is also optimized slightly, we no longer waste cycles on IRQ disabling for flushing of the page from the ITLB, since we're already under CLI protection by the initial exception handler. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/kernel/sys_sh.c2
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/sh/mm/fault.c202
-rw-r--r--arch/sh/mm/init.c13
-rw-r--r--arch/sh/mm/tlb-flush.c132
6 files changed, 188 insertions, 165 deletions
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 917b2f32f260..d8bcd8a22327 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -21,7 +21,7 @@
21#include <linux/mman.h> 21#include <linux/mman.h>
22#include <linux/file.h> 22#include <linux/file.h>
23#include <linux/utsname.h> 23#include <linux/utsname.h>
24 24#include <asm/cacheflush.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/ipc.h> 26#include <asm/ipc.h>
27 27
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index f4e32b3d24dc..d90906367c5f 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o
12obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 12obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
13 13
14mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o 14mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o
15mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o 15mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o
16 16
17obj-y += $(mmu-y) 17obj-y += $(mmu-y)
18 18
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index ee73e30263af..c81e6b67ad30 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -9,6 +9,8 @@
9 */ 9 */
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <asm/cacheflush.h>
13#include <asm/addrspace.h>
12#include <asm/io.h> 14#include <asm/io.h>
13 15
14void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) 16void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 364181f27b79..7a03ffe6dadd 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -1,33 +1,20 @@
1/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ 1/*
2 * Page fault handler for SH with an MMU.
2 * 3 *
3 * linux/arch/sh/mm/fault.c
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt 5 * Copyright (C) 2003 Paul Mundt
6 * 6 *
7 * Based on linux/arch/i386/mm/fault.c: 7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
9 */ 13 */
10
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h> 15#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24
25#include <asm/system.h> 16#include <asm/system.h>
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/pgalloc.h>
29#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
30#include <asm/cacheflush.h>
31#include <asm/kgdb.h> 18#include <asm/kgdb.h>
32 19
33extern void die(const char *,struct pt_regs *,long); 20extern void die(const char *,struct pt_regs *,long);
@@ -187,14 +174,25 @@ do_sigbus:
187 goto no_context; 174 goto no_context;
188} 175}
189 176
177#ifdef CONFIG_SH_STORE_QUEUES
190/* 178/*
191 * Called with interrupt disabled. 179 * This is a special case for the SH-4 store queues, as pages for this
180 * space still need to be faulted in before it's possible to flush the
181 * store queue cache for writeout to the remapped region.
182 */
183#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
184#else
185#define P3_ADDR_MAX P4SEG
186#endif
187
188/*
189 * Called with interrupts disabled.
192 */ 190 */
193asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, 191asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
194 unsigned long address) 192 unsigned long address)
195{ 193{
196 unsigned long addrmax = P4SEG;
197 pgd_t *pgd; 194 pgd_t *pgd;
195 pud_t *pud;
198 pmd_t *pmd; 196 pmd_t *pmd;
199 pte_t *pte; 197 pte_t *pte;
200 pte_t entry; 198 pte_t entry;
@@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
207 kgdb_bus_err_hook(); 205 kgdb_bus_err_hook();
208#endif 206#endif
209 207
210#ifdef CONFIG_SH_STORE_QUEUES 208 /*
211 addrmax = P4SEG_STORE_QUE + 0x04000000; 209 * We don't take page faults for P1, P2, and parts of P4, these
212#endif 210 * are always mapped, whether it be due to legacy behaviour in
213 211 * 29-bit mode, or due to PMB configuration in 32-bit mode.
214 if (address >= P3SEG && address < addrmax) { 212 */
213 if (address >= P3SEG && address < P3_ADDR_MAX)
215 pgd = pgd_offset_k(address); 214 pgd = pgd_offset_k(address);
216 mm = NULL; 215 else {
217 } else if (address >= TASK_SIZE) 216 if (unlikely(address >= TASK_SIZE || !current->mm))
218 return 1; 217 return 1;
219 else if (!(mm = current->mm)) 218
220 return 1; 219 pgd = pgd_offset(current->mm, address);
221 else 220 }
222 pgd = pgd_offset(mm, address);
223 221
224 pmd = pmd_offset(pgd, address); 222 pud = pud_offset(pgd, address);
223 if (pud_none_or_clear_bad(pud))
224 return 1;
225 pmd = pmd_offset(pud, address);
225 if (pmd_none_or_clear_bad(pmd)) 226 if (pmd_none_or_clear_bad(pmd))
226 return 1; 227 return 1;
228
227 if (mm) 229 if (mm)
228 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 230 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
229 else 231 else
230 pte = pte_offset_kernel(pmd, address); 232 pte = pte_offset_kernel(pmd, address);
231 233
232 entry = *pte; 234 entry = *pte;
233 if (pte_none(entry) || pte_not_present(entry) 235 if (unlikely(pte_none(entry) || pte_not_present(entry)))
234 || (writeaccess && !pte_write(entry))) 236 goto unlock;
237 if (unlikely(writeaccess && !pte_write(entry)))
235 goto unlock; 238 goto unlock;
236 239
237 if (writeaccess) 240 if (writeaccess)
@@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
243 * ITLB is not affected by "ldtlb" instruction. 246 * ITLB is not affected by "ldtlb" instruction.
244 * So, we need to flush the entry by ourselves. 247 * So, we need to flush the entry by ourselves.
245 */ 248 */
246 249 __flush_tlb_page(get_asid(), address & PAGE_MASK);
247 {
248 unsigned long flags;
249 local_irq_save(flags);
250 __flush_tlb_page(get_asid(), address&PAGE_MASK);
251 local_irq_restore(flags);
252 }
253#endif 250#endif
254 251
255 set_pte(pte, entry); 252 set_pte(pte, entry);
@@ -260,122 +257,3 @@ unlock:
260 pte_unmap_unlock(pte, ptl); 257 pte_unmap_unlock(pte, ptl);
261 return ret; 258 return ret;
262} 259}
263
264void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
265{
266 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
267 unsigned long flags;
268 unsigned long asid;
269 unsigned long saved_asid = MMU_NO_ASID;
270
271 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
272 page &= PAGE_MASK;
273
274 local_irq_save(flags);
275 if (vma->vm_mm != current->mm) {
276 saved_asid = get_asid();
277 set_asid(asid);
278 }
279 __flush_tlb_page(asid, page);
280 if (saved_asid != MMU_NO_ASID)
281 set_asid(saved_asid);
282 local_irq_restore(flags);
283 }
284}
285
286void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
287 unsigned long end)
288{
289 struct mm_struct *mm = vma->vm_mm;
290
291 if (mm->context != NO_CONTEXT) {
292 unsigned long flags;
293 int size;
294
295 local_irq_save(flags);
296 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
297 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
298 mm->context = NO_CONTEXT;
299 if (mm == current->mm)
300 activate_context(mm);
301 } else {
302 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
303 unsigned long saved_asid = MMU_NO_ASID;
304
305 start &= PAGE_MASK;
306 end += (PAGE_SIZE - 1);
307 end &= PAGE_MASK;
308 if (mm != current->mm) {
309 saved_asid = get_asid();
310 set_asid(asid);
311 }
312 while (start < end) {
313 __flush_tlb_page(asid, start);
314 start += PAGE_SIZE;
315 }
316 if (saved_asid != MMU_NO_ASID)
317 set_asid(saved_asid);
318 }
319 local_irq_restore(flags);
320 }
321}
322
323void flush_tlb_kernel_range(unsigned long start, unsigned long end)
324{
325 unsigned long flags;
326 int size;
327
328 local_irq_save(flags);
329 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
330 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
331 flush_tlb_all();
332 } else {
333 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
334 unsigned long saved_asid = get_asid();
335
336 start &= PAGE_MASK;
337 end += (PAGE_SIZE - 1);
338 end &= PAGE_MASK;
339 set_asid(asid);
340 while (start < end) {
341 __flush_tlb_page(asid, start);
342 start += PAGE_SIZE;
343 }
344 set_asid(saved_asid);
345 }
346 local_irq_restore(flags);
347}
348
349void flush_tlb_mm(struct mm_struct *mm)
350{
351 /* Invalidate all TLB of this process. */
352 /* Instead of invalidating each TLB, we get new MMU context. */
353 if (mm->context != NO_CONTEXT) {
354 unsigned long flags;
355
356 local_irq_save(flags);
357 mm->context = NO_CONTEXT;
358 if (mm == current->mm)
359 activate_context(mm);
360 local_irq_restore(flags);
361 }
362}
363
364void flush_tlb_all(void)
365{
366 unsigned long flags, status;
367
368 /*
369 * Flush all the TLB.
370 *
371 * Write to the MMU control register's bit:
372 * TF-bit for SH-3, TI-bit for SH-4.
373 * It's same position, bit #2.
374 */
375 local_irq_save(flags);
376 status = ctrl_inl(MMUCR);
377 status |= 0x04;
378 ctrl_outl(status, MMUCR);
379 ctrl_barrier();
380 local_irq_restore(flags);
381}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 8ea27ca4b700..d1a979eab656 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -80,6 +80,7 @@ void show_mem(void)
80static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 80static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
81{ 81{
82 pgd_t *pgd; 82 pgd_t *pgd;
83 pud_t *pud;
83 pmd_t *pmd; 84 pmd_t *pmd;
84 pte_t *pte; 85 pte_t *pte;
85 86
@@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
89 return; 90 return;
90 } 91 }
91 92
92 pmd = pmd_offset(pgd, addr); 93 pud = pud_offset(pgd, addr);
94 if (pud_none(*pud)) {
95 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
96 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
97 if (pmd != pmd_offset(pud, 0)) {
98 pud_ERROR(*pud);
99 return;
100 }
101 }
102
103 pmd = pmd_offset(pud, addr);
93 if (pmd_none(*pmd)) { 104 if (pmd_none(*pmd)) {
94 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); 105 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
95 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); 106 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
new file mode 100644
index 000000000000..fd7e42bcaa40
--- /dev/null
+++ b/arch/sh/mm/tlb-flush.c
@@ -0,0 +1,132 @@
1/*
2 * TLB flushing operations for SH with an MMU.
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/mm.h>
12#include <asm/mmu_context.h>
13#include <asm/tlbflush.h>
14
15void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
16{
17 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
18 unsigned long flags;
19 unsigned long asid;
20 unsigned long saved_asid = MMU_NO_ASID;
21
22 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
23 page &= PAGE_MASK;
24
25 local_irq_save(flags);
26 if (vma->vm_mm != current->mm) {
27 saved_asid = get_asid();
28 set_asid(asid);
29 }
30 __flush_tlb_page(asid, page);
31 if (saved_asid != MMU_NO_ASID)
32 set_asid(saved_asid);
33 local_irq_restore(flags);
34 }
35}
36
37void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
38 unsigned long end)
39{
40 struct mm_struct *mm = vma->vm_mm;
41
42 if (mm->context != NO_CONTEXT) {
43 unsigned long flags;
44 int size;
45
46 local_irq_save(flags);
47 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
48 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
49 mm->context = NO_CONTEXT;
50 if (mm == current->mm)
51 activate_context(mm);
52 } else {
53 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
54 unsigned long saved_asid = MMU_NO_ASID;
55
56 start &= PAGE_MASK;
57 end += (PAGE_SIZE - 1);
58 end &= PAGE_MASK;
59 if (mm != current->mm) {
60 saved_asid = get_asid();
61 set_asid(asid);
62 }
63 while (start < end) {
64 __flush_tlb_page(asid, start);
65 start += PAGE_SIZE;
66 }
67 if (saved_asid != MMU_NO_ASID)
68 set_asid(saved_asid);
69 }
70 local_irq_restore(flags);
71 }
72}
73
74void flush_tlb_kernel_range(unsigned long start, unsigned long end)
75{
76 unsigned long flags;
77 int size;
78
79 local_irq_save(flags);
80 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
81 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
82 flush_tlb_all();
83 } else {
84 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
85 unsigned long saved_asid = get_asid();
86
87 start &= PAGE_MASK;
88 end += (PAGE_SIZE - 1);
89 end &= PAGE_MASK;
90 set_asid(asid);
91 while (start < end) {
92 __flush_tlb_page(asid, start);
93 start += PAGE_SIZE;
94 }
95 set_asid(saved_asid);
96 }
97 local_irq_restore(flags);
98}
99
100void flush_tlb_mm(struct mm_struct *mm)
101{
102 /* Invalidate all TLB of this process. */
103 /* Instead of invalidating each TLB, we get new MMU context. */
104 if (mm->context != NO_CONTEXT) {
105 unsigned long flags;
106
107 local_irq_save(flags);
108 mm->context = NO_CONTEXT;
109 if (mm == current->mm)
110 activate_context(mm);
111 local_irq_restore(flags);
112 }
113}
114
115void flush_tlb_all(void)
116{
117 unsigned long flags, status;
118
119 /*
120 * Flush all the TLB.
121 *
122 * Write to the MMU control register's bit:
123 * TF-bit for SH-3, TI-bit for SH-4.
124 * It's same position, bit #2.
125 */
126 local_irq_save(flags);
127 status = ctrl_inl(MMUCR);
128 status |= 0x04;
129 ctrl_outl(status, MMUCR);
130 ctrl_barrier();
131 local_irq_restore(flags);
132}