aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/fault.c')
-rw-r--r--arch/sh/mm/fault.c207
1 files changed, 45 insertions, 162 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 775f86cd3fe8..c69fd603226a 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -1,33 +1,22 @@
1/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ 1/*
2 * Page fault handler for SH with an MMU.
2 * 3 *
3 * linux/arch/sh/mm/fault.c
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt 5 * Copyright (C) 2003 Paul Mundt
6 * 6 *
7 * Based on linux/arch/i386/mm/fault.c: 7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
9 */ 13 */
10
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h> 15#include <linux/mm.h>
20#include <linux/smp.h> 16#include <linux/hardirq.h>
21#include <linux/smp_lock.h> 17#include <linux/kprobes.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24
25#include <asm/system.h> 18#include <asm/system.h>
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/pgalloc.h>
29#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
30#include <asm/cacheflush.h>
31#include <asm/kgdb.h> 20#include <asm/kgdb.h>
32 21
33extern void die(const char *,struct pt_regs *,long); 22extern void die(const char *,struct pt_regs *,long);
@@ -187,18 +176,30 @@ do_sigbus:
187 goto no_context; 176 goto no_context;
188} 177}
189 178
179#ifdef CONFIG_SH_STORE_QUEUES
190/* 180/*
191 * Called with interrupt disabled. 181 * This is a special case for the SH-4 store queues, as pages for this
182 * space still need to be faulted in before it's possible to flush the
183 * store queue cache for writeout to the remapped region.
192 */ 184 */
193asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, 185#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
194 unsigned long address) 186#else
187#define P3_ADDR_MAX P4SEG
188#endif
189
190/*
191 * Called with interrupts disabled.
192 */
193asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
194 unsigned long writeaccess,
195 unsigned long address)
195{ 196{
196 unsigned long addrmax = P4SEG;
197 pgd_t *pgd; 197 pgd_t *pgd;
198 pud_t *pud;
198 pmd_t *pmd; 199 pmd_t *pmd;
199 pte_t *pte; 200 pte_t *pte;
200 pte_t entry; 201 pte_t entry;
201 struct mm_struct *mm; 202 struct mm_struct *mm = current->mm;
202 spinlock_t *ptl; 203 spinlock_t *ptl;
203 int ret = 1; 204 int ret = 1;
204 205
@@ -207,31 +208,37 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
207 kgdb_bus_err_hook(); 208 kgdb_bus_err_hook();
208#endif 209#endif
209 210
210#ifdef CONFIG_SH_STORE_QUEUES 211 /*
211 addrmax = P4SEG_STORE_QUE + 0x04000000; 212 * We don't take page faults for P1, P2, and parts of P4, these
212#endif 213 * are always mapped, whether it be due to legacy behaviour in
213 214 * 29-bit mode, or due to PMB configuration in 32-bit mode.
214 if (address >= P3SEG && address < addrmax) { 215 */
216 if (address >= P3SEG && address < P3_ADDR_MAX) {
215 pgd = pgd_offset_k(address); 217 pgd = pgd_offset_k(address);
216 mm = NULL; 218 mm = NULL;
217 } else if (address >= TASK_SIZE) 219 } else {
218 return 1; 220 if (unlikely(address >= TASK_SIZE || !mm))
219 else if (!(mm = current->mm)) 221 return 1;
220 return 1; 222
221 else
222 pgd = pgd_offset(mm, address); 223 pgd = pgd_offset(mm, address);
224 }
223 225
224 pmd = pmd_offset(pgd, address); 226 pud = pud_offset(pgd, address);
227 if (pud_none_or_clear_bad(pud))
228 return 1;
229 pmd = pmd_offset(pud, address);
225 if (pmd_none_or_clear_bad(pmd)) 230 if (pmd_none_or_clear_bad(pmd))
226 return 1; 231 return 1;
232
227 if (mm) 233 if (mm)
228 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 234 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
229 else 235 else
230 pte = pte_offset_kernel(pmd, address); 236 pte = pte_offset_kernel(pmd, address);
231 237
232 entry = *pte; 238 entry = *pte;
233 if (pte_none(entry) || pte_not_present(entry) 239 if (unlikely(pte_none(entry) || pte_not_present(entry)))
234 || (writeaccess && !pte_write(entry))) 240 goto unlock;
241 if (unlikely(writeaccess && !pte_write(entry)))
235 goto unlock; 242 goto unlock;
236 243
237 if (writeaccess) 244 if (writeaccess)
@@ -243,13 +250,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
243 * ITLB is not affected by "ldtlb" instruction. 250 * ITLB is not affected by "ldtlb" instruction.
244 * So, we need to flush the entry by ourselves. 251 * So, we need to flush the entry by ourselves.
245 */ 252 */
246 253 __flush_tlb_page(get_asid(), address & PAGE_MASK);
247 {
248 unsigned long flags;
249 local_irq_save(flags);
250 __flush_tlb_page(get_asid(), address&PAGE_MASK);
251 local_irq_restore(flags);
252 }
253#endif 254#endif
254 255
255 set_pte(pte, entry); 256 set_pte(pte, entry);
@@ -260,121 +261,3 @@ unlock:
260 pte_unmap_unlock(pte, ptl); 261 pte_unmap_unlock(pte, ptl);
261 return ret; 262 return ret;
262} 263}
263
264void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
265{
266 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
267 unsigned long flags;
268 unsigned long asid;
269 unsigned long saved_asid = MMU_NO_ASID;
270
271 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
272 page &= PAGE_MASK;
273
274 local_irq_save(flags);
275 if (vma->vm_mm != current->mm) {
276 saved_asid = get_asid();
277 set_asid(asid);
278 }
279 __flush_tlb_page(asid, page);
280 if (saved_asid != MMU_NO_ASID)
281 set_asid(saved_asid);
282 local_irq_restore(flags);
283 }
284}
285
286void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
287 unsigned long end)
288{
289 struct mm_struct *mm = vma->vm_mm;
290
291 if (mm->context != NO_CONTEXT) {
292 unsigned long flags;
293 int size;
294
295 local_irq_save(flags);
296 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
297 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
298 mm->context = NO_CONTEXT;
299 if (mm == current->mm)
300 activate_context(mm);
301 } else {
302 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
303 unsigned long saved_asid = MMU_NO_ASID;
304
305 start &= PAGE_MASK;
306 end += (PAGE_SIZE - 1);
307 end &= PAGE_MASK;
308 if (mm != current->mm) {
309 saved_asid = get_asid();
310 set_asid(asid);
311 }
312 while (start < end) {
313 __flush_tlb_page(asid, start);
314 start += PAGE_SIZE;
315 }
316 if (saved_asid != MMU_NO_ASID)
317 set_asid(saved_asid);
318 }
319 local_irq_restore(flags);
320 }
321}
322
323void flush_tlb_kernel_range(unsigned long start, unsigned long end)
324{
325 unsigned long flags;
326 int size;
327
328 local_irq_save(flags);
329 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
330 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
331 flush_tlb_all();
332 } else {
333 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
334 unsigned long saved_asid = get_asid();
335
336 start &= PAGE_MASK;
337 end += (PAGE_SIZE - 1);
338 end &= PAGE_MASK;
339 set_asid(asid);
340 while (start < end) {
341 __flush_tlb_page(asid, start);
342 start += PAGE_SIZE;
343 }
344 set_asid(saved_asid);
345 }
346 local_irq_restore(flags);
347}
348
349void flush_tlb_mm(struct mm_struct *mm)
350{
351 /* Invalidate all TLB of this process. */
352 /* Instead of invalidating each TLB, we get new MMU context. */
353 if (mm->context != NO_CONTEXT) {
354 unsigned long flags;
355
356 local_irq_save(flags);
357 mm->context = NO_CONTEXT;
358 if (mm == current->mm)
359 activate_context(mm);
360 local_irq_restore(flags);
361 }
362}
363
364void flush_tlb_all(void)
365{
366 unsigned long flags, status;
367
368 /*
369 * Flush all the TLB.
370 *
371 * Write to the MMU control register's bit:
372 * TF-bit for SH-3, TI-bit for SH-4.
373 * It's same position, bit #2.
374 */
375 local_irq_save(flags);
376 status = ctrl_inl(MMUCR);
377 status |= 0x04;
378 ctrl_outl(status, MMUCR);
379 local_irq_restore(flags);
380}