aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/fault_32.c')
-rw-r--r--arch/sh/mm/fault_32.c57
1 files changed, 33 insertions, 24 deletions
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 08a08ea5d69f..898d477e47c1 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,28 +15,13 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/marker.h>
18#include <asm/io_trapped.h> 19#include <asm/io_trapped.h>
19#include <asm/system.h> 20#include <asm/system.h>
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
22#include <asm/kgdb.h> 23#include <asm/kgdb.h>
23 24
24static inline int notify_page_fault(struct pt_regs *regs, int trap)
25{
26 int ret = 0;
27
28#ifdef CONFIG_KPROBES
29 if (!user_mode(regs)) {
30 preempt_disable();
31 if (kprobe_running() && kprobe_fault_handler(regs, trap))
32 ret = 1;
33 preempt_enable();
34 }
35#endif
36
37 return ret;
38}
39
40/* 25/*
41 * This routine handles page faults. It determines the address, 26 * This routine handles page faults. It determines the address,
42 * and the problem, and then passes it off to one of the appropriate 27 * and the problem, and then passes it off to one of the appropriate
@@ -261,6 +246,25 @@ do_sigbus:
261 goto no_context; 246 goto no_context;
262} 247}
263 248
249static inline int notify_page_fault(struct pt_regs *regs, int trap)
250{
251 int ret = 0;
252
253 trace_mark(kernel_arch_trap_entry, "trap_id %d ip #p%ld",
254 trap >> 5, instruction_pointer(regs));
255
256#ifdef CONFIG_KPROBES
257 if (!user_mode(regs)) {
258 preempt_disable();
259 if (kprobe_running() && kprobe_fault_handler(regs, trap))
260 ret = 1;
261 preempt_enable();
262 }
263#endif
264
265 return ret;
266}
267
264#ifdef CONFIG_SH_STORE_QUEUES 268#ifdef CONFIG_SH_STORE_QUEUES
265/* 269/*
266 * This is a special case for the SH-4 store queues, as pages for this 270 * This is a special case for the SH-4 store queues, as pages for this
@@ -284,15 +288,18 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
284 pmd_t *pmd; 288 pmd_t *pmd;
285 pte_t *pte; 289 pte_t *pte;
286 pte_t entry; 290 pte_t entry;
291 int ret = 0;
287 292
288 if (notify_page_fault(regs, lookup_exception_vector())) 293 if (notify_page_fault(regs, lookup_exception_vector()))
289 return 0; 294 goto out;
290 295
291#ifdef CONFIG_SH_KGDB 296#ifdef CONFIG_SH_KGDB
292 if (kgdb_nofault && kgdb_bus_err_hook) 297 if (kgdb_nofault && kgdb_bus_err_hook)
293 kgdb_bus_err_hook(); 298 kgdb_bus_err_hook();
294#endif 299#endif
295 300
301 ret = 1;
302
296 /* 303 /*
297 * We don't take page faults for P1, P2, and parts of P4, these 304 * We don't take page faults for P1, P2, and parts of P4, these
298 * are always mapped, whether it be due to legacy behaviour in 305 * are always mapped, whether it be due to legacy behaviour in
@@ -302,24 +309,23 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
302 pgd = pgd_offset_k(address); 309 pgd = pgd_offset_k(address);
303 } else { 310 } else {
304 if (unlikely(address >= TASK_SIZE || !current->mm)) 311 if (unlikely(address >= TASK_SIZE || !current->mm))
305 return 1; 312 goto out;
306 313
307 pgd = pgd_offset(current->mm, address); 314 pgd = pgd_offset(current->mm, address);
308 } 315 }
309 316
310 pud = pud_offset(pgd, address); 317 pud = pud_offset(pgd, address);
311 if (pud_none_or_clear_bad(pud)) 318 if (pud_none_or_clear_bad(pud))
312 return 1; 319 goto out;
313 pmd = pmd_offset(pud, address); 320 pmd = pmd_offset(pud, address);
314 if (pmd_none_or_clear_bad(pmd)) 321 if (pmd_none_or_clear_bad(pmd))
315 return 1; 322 goto out;
316
317 pte = pte_offset_kernel(pmd, address); 323 pte = pte_offset_kernel(pmd, address);
318 entry = *pte; 324 entry = *pte;
319 if (unlikely(pte_none(entry) || pte_not_present(entry))) 325 if (unlikely(pte_none(entry) || pte_not_present(entry)))
320 return 1; 326 goto out;
321 if (unlikely(writeaccess && !pte_write(entry))) 327 if (unlikely(writeaccess && !pte_write(entry)))
322 return 1; 328 goto out;
323 329
324 if (writeaccess) 330 if (writeaccess)
325 entry = pte_mkdirty(entry); 331 entry = pte_mkdirty(entry);
@@ -336,5 +342,8 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
336 set_pte(pte, entry); 342 set_pte(pte, entry);
337 update_mmu_cache(NULL, address, entry); 343 update_mmu_cache(NULL, address, entry);
338 344
339 return 0; 345 ret = 0;
346out:
347 trace_mark(kernel_arch_trap_exit, MARK_NOARGS);
348 return ret;
340} 349}