aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 02:13:36 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 02:13:36 -0400
commit26ff6c11ef38e08990c1e417c299246e6ab18ff7 (patch)
treeebd37fd0270b7c7dfe8474a046663db78fcdb1ab /arch/sh/mm/fault.c
parent9359e757709a211040e4b0151eae69248e7c6eca (diff)
sh: page table alloc cleanups and page fault optimizations.
Cleanup of page table allocators, using generic folded PMD and PUD helpers. TLB flushing operations are moved to a more sensible spot. The page fault handler is also optimized slightly, we no longer waste cycles on IRQ disabling for flushing of the page from the ITLB, since we're already under CLI protection by the initial exception handler. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/fault.c')
-rw-r--r--arch/sh/mm/fault.c202
1 files changed, 40 insertions, 162 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 364181f27b7..7a03ffe6dad 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -1,33 +1,20 @@
1/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ 1/*
2 * Page fault handler for SH with an MMU.
2 * 3 *
3 * linux/arch/sh/mm/fault.c
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt 5 * Copyright (C) 2003 Paul Mundt
6 * 6 *
7 * Based on linux/arch/i386/mm/fault.c: 7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
9 */ 13 */
10
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h> 15#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24
25#include <asm/system.h> 16#include <asm/system.h>
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/pgalloc.h>
29#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
30#include <asm/cacheflush.h>
31#include <asm/kgdb.h> 18#include <asm/kgdb.h>
32 19
33extern void die(const char *,struct pt_regs *,long); 20extern void die(const char *,struct pt_regs *,long);
@@ -187,14 +174,25 @@ do_sigbus:
187 goto no_context; 174 goto no_context;
188} 175}
189 176
177#ifdef CONFIG_SH_STORE_QUEUES
190/* 178/*
191 * Called with interrupt disabled. 179 * This is a special case for the SH-4 store queues, as pages for this
180 * space still need to be faulted in before it's possible to flush the
181 * store queue cache for writeout to the remapped region.
182 */
183#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
184#else
185#define P3_ADDR_MAX P4SEG
186#endif
187
188/*
189 * Called with interrupts disabled.
192 */ 190 */
193asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, 191asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
194 unsigned long address) 192 unsigned long address)
195{ 193{
196 unsigned long addrmax = P4SEG;
197 pgd_t *pgd; 194 pgd_t *pgd;
195 pud_t *pud;
198 pmd_t *pmd; 196 pmd_t *pmd;
199 pte_t *pte; 197 pte_t *pte;
200 pte_t entry; 198 pte_t entry;
@@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
207 kgdb_bus_err_hook(); 205 kgdb_bus_err_hook();
208#endif 206#endif
209 207
210#ifdef CONFIG_SH_STORE_QUEUES 208 /*
211 addrmax = P4SEG_STORE_QUE + 0x04000000; 209 * We don't take page faults for P1, P2, and parts of P4, these
212#endif 210 * are always mapped, whether it be due to legacy behaviour in
213 211 * 29-bit mode, or due to PMB configuration in 32-bit mode.
214 if (address >= P3SEG && address < addrmax) { 212 */
213 if (address >= P3SEG && address < P3_ADDR_MAX)
215 pgd = pgd_offset_k(address); 214 pgd = pgd_offset_k(address);
216 mm = NULL; 215 else {
217 } else if (address >= TASK_SIZE) 216 if (unlikely(address >= TASK_SIZE || !current->mm))
218 return 1; 217 return 1;
219 else if (!(mm = current->mm)) 218
220 return 1; 219 pgd = pgd_offset(current->mm, address);
221 else 220 }
222 pgd = pgd_offset(mm, address);
223 221
224 pmd = pmd_offset(pgd, address); 222 pud = pud_offset(pgd, address);
223 if (pud_none_or_clear_bad(pud))
224 return 1;
225 pmd = pmd_offset(pud, address);
225 if (pmd_none_or_clear_bad(pmd)) 226 if (pmd_none_or_clear_bad(pmd))
226 return 1; 227 return 1;
228
227 if (mm) 229 if (mm)
228 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 230 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
229 else 231 else
230 pte = pte_offset_kernel(pmd, address); 232 pte = pte_offset_kernel(pmd, address);
231 233
232 entry = *pte; 234 entry = *pte;
233 if (pte_none(entry) || pte_not_present(entry) 235 if (unlikely(pte_none(entry) || pte_not_present(entry)))
234 || (writeaccess && !pte_write(entry))) 236 goto unlock;
237 if (unlikely(writeaccess && !pte_write(entry)))
235 goto unlock; 238 goto unlock;
236 239
237 if (writeaccess) 240 if (writeaccess)
@@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
243 * ITLB is not affected by "ldtlb" instruction. 246 * ITLB is not affected by "ldtlb" instruction.
244 * So, we need to flush the entry by ourselves. 247 * So, we need to flush the entry by ourselves.
245 */ 248 */
246 249 __flush_tlb_page(get_asid(), address & PAGE_MASK);
247 {
248 unsigned long flags;
249 local_irq_save(flags);
250 __flush_tlb_page(get_asid(), address&PAGE_MASK);
251 local_irq_restore(flags);
252 }
253#endif 250#endif
254 251
255 set_pte(pte, entry); 252 set_pte(pte, entry);
@@ -260,122 +257,3 @@ unlock:
260 pte_unmap_unlock(pte, ptl); 257 pte_unmap_unlock(pte, ptl);
261 return ret; 258 return ret;
262} 259}
263
264void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
265{
266 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
267 unsigned long flags;
268 unsigned long asid;
269 unsigned long saved_asid = MMU_NO_ASID;
270
271 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
272 page &= PAGE_MASK;
273
274 local_irq_save(flags);
275 if (vma->vm_mm != current->mm) {
276 saved_asid = get_asid();
277 set_asid(asid);
278 }
279 __flush_tlb_page(asid, page);
280 if (saved_asid != MMU_NO_ASID)
281 set_asid(saved_asid);
282 local_irq_restore(flags);
283 }
284}
285
286void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
287 unsigned long end)
288{
289 struct mm_struct *mm = vma->vm_mm;
290
291 if (mm->context != NO_CONTEXT) {
292 unsigned long flags;
293 int size;
294
295 local_irq_save(flags);
296 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
297 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
298 mm->context = NO_CONTEXT;
299 if (mm == current->mm)
300 activate_context(mm);
301 } else {
302 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
303 unsigned long saved_asid = MMU_NO_ASID;
304
305 start &= PAGE_MASK;
306 end += (PAGE_SIZE - 1);
307 end &= PAGE_MASK;
308 if (mm != current->mm) {
309 saved_asid = get_asid();
310 set_asid(asid);
311 }
312 while (start < end) {
313 __flush_tlb_page(asid, start);
314 start += PAGE_SIZE;
315 }
316 if (saved_asid != MMU_NO_ASID)
317 set_asid(saved_asid);
318 }
319 local_irq_restore(flags);
320 }
321}
322
323void flush_tlb_kernel_range(unsigned long start, unsigned long end)
324{
325 unsigned long flags;
326 int size;
327
328 local_irq_save(flags);
329 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
330 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
331 flush_tlb_all();
332 } else {
333 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
334 unsigned long saved_asid = get_asid();
335
336 start &= PAGE_MASK;
337 end += (PAGE_SIZE - 1);
338 end &= PAGE_MASK;
339 set_asid(asid);
340 while (start < end) {
341 __flush_tlb_page(asid, start);
342 start += PAGE_SIZE;
343 }
344 set_asid(saved_asid);
345 }
346 local_irq_restore(flags);
347}
348
349void flush_tlb_mm(struct mm_struct *mm)
350{
351 /* Invalidate all TLB of this process. */
352 /* Instead of invalidating each TLB, we get new MMU context. */
353 if (mm->context != NO_CONTEXT) {
354 unsigned long flags;
355
356 local_irq_save(flags);
357 mm->context = NO_CONTEXT;
358 if (mm == current->mm)
359 activate_context(mm);
360 local_irq_restore(flags);
361 }
362}
363
364void flush_tlb_all(void)
365{
366 unsigned long flags, status;
367
368 /*
369 * Flush all the TLB.
370 *
371 * Write to the MMU control register's bit:
372 * TF-bit for SH-3, TI-bit for SH-4.
373 * It's same position, bit #2.
374 */
375 local_irq_save(flags);
376 status = ctrl_inl(MMUCR);
377 status |= 0x04;
378 ctrl_outl(status, MMUCR);
379 ctrl_barrier();
380 local_irq_restore(flags);
381}