aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/kprobes_32.c3
-rw-r--r--arch/x86/kernel/kprobes_64.c1
-rw-r--r--arch/x86/mm/fault_32.c43
-rw-r--r--arch/x86/mm/fault_64.c44
4 files changed, 39 insertions, 52 deletions
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index e7d0d3c2ef64..06b86e5617f6 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -584,7 +584,7 @@ out:
584 return 1; 584 return 1;
585} 585}
586 586
587static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 587int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
588{ 588{
589 struct kprobe *cur = kprobe_running(); 589 struct kprobe *cur = kprobe_running();
590 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 590 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -666,7 +666,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
666 ret = NOTIFY_STOP; 666 ret = NOTIFY_STOP;
667 break; 667 break;
668 case DIE_GPF: 668 case DIE_GPF:
669 case DIE_PAGE_FAULT:
670 /* kprobe_running() needs smp_processor_id() */ 669 /* kprobe_running() needs smp_processor_id() */
671 preempt_disable(); 670 preempt_disable();
672 if (kprobe_running() && 671 if (kprobe_running() &&
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index 62e28e52d784..7c16506d681f 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -657,7 +657,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
657 ret = NOTIFY_STOP; 657 ret = NOTIFY_STOP;
658 break; 658 break;
659 case DIE_GPF: 659 case DIE_GPF:
660 case DIE_PAGE_FAULT:
661 /* kprobe_running() needs smp_processor_id() */ 660 /* kprobe_running() needs smp_processor_id() */
662 preempt_disable(); 661 preempt_disable();
663 if (kprobe_running() && 662 if (kprobe_running() &&
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index fcb38e7f3543..be72c2a5b03b 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -25,6 +25,7 @@
25#include <linux/kprobes.h> 25#include <linux/kprobes.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/kprobes.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/desc.h> 31#include <asm/desc.h>
@@ -32,33 +33,27 @@
32 33
33extern void die(const char *,struct pt_regs *,long); 34extern void die(const char *,struct pt_regs *,long);
34 35
35static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 36#ifdef CONFIG_KPROBES
36 37static inline int notify_page_fault(struct pt_regs *regs)
37int register_page_fault_notifier(struct notifier_block *nb)
38{ 38{
39 vmalloc_sync_all(); 39 int ret = 0;
40 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 40
41} 41 /* kprobe_running() needs smp_processor_id() */
42EXPORT_SYMBOL_GPL(register_page_fault_notifier); 42 if (!user_mode_vm(regs)) {
43 preempt_disable();
44 if (kprobe_running() && kprobe_fault_handler(regs, 14))
45 ret = 1;
46 preempt_enable();
47 }
43 48
44int unregister_page_fault_notifier(struct notifier_block *nb) 49 return ret;
45{
46 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
47} 50}
48EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); 51#else
49 52static inline int notify_page_fault(struct pt_regs *regs)
50static inline int notify_page_fault(struct pt_regs *regs, long err)
51{ 53{
52 struct die_args args = { 54 return 0;
53 .regs = regs,
54 .str = "page fault",
55 .err = err,
56 .trapnr = 14,
57 .signr = SIGSEGV
58 };
59 return atomic_notifier_call_chain(&notify_page_fault_chain,
60 DIE_PAGE_FAULT, &args);
61} 55}
56#endif
62 57
63/* 58/*
64 * Return EIP plus the CS segment base. The segment limit is also 59 * Return EIP plus the CS segment base. The segment limit is also
@@ -331,7 +326,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
331 if (unlikely(address >= TASK_SIZE)) { 326 if (unlikely(address >= TASK_SIZE)) {
332 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) 327 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
333 return; 328 return;
334 if (notify_page_fault(regs, error_code) == NOTIFY_STOP) 329 if (notify_page_fault(regs))
335 return; 330 return;
336 /* 331 /*
337 * Don't take the mm semaphore here. If we fixup a prefetch 332 * Don't take the mm semaphore here. If we fixup a prefetch
@@ -340,7 +335,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
340 goto bad_area_nosemaphore; 335 goto bad_area_nosemaphore;
341 } 336 }
342 337
343 if (notify_page_fault(regs, error_code) == NOTIFY_STOP) 338 if (notify_page_fault(regs))
344 return; 339 return;
345 340
346 /* It's safe to allow irq's after cr2 has been saved and the vmalloc 341 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 54816adb8e93..5e0e54906c48 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -25,6 +25,7 @@
25#include <linux/kprobes.h> 25#include <linux/kprobes.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/kprobes.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
@@ -40,34 +41,27 @@
40#define PF_RSVD (1<<3) 41#define PF_RSVD (1<<3)
41#define PF_INSTR (1<<4) 42#define PF_INSTR (1<<4)
42 43
43static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 44#ifdef CONFIG_KPROBES
44 45static inline int notify_page_fault(struct pt_regs *regs)
45/* Hook to register for page fault notifications */
46int register_page_fault_notifier(struct notifier_block *nb)
47{ 46{
48 vmalloc_sync_all(); 47 int ret = 0;
49 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 48
50} 49 /* kprobe_running() needs smp_processor_id() */
51EXPORT_SYMBOL_GPL(register_page_fault_notifier); 50 if (!user_mode(regs)) {
51 preempt_disable();
52 if (kprobe_running() && kprobe_fault_handler(regs, 14))
53 ret = 1;
54 preempt_enable();
55 }
52 56
53int unregister_page_fault_notifier(struct notifier_block *nb) 57 return ret;
54{
55 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
56} 58}
57EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); 59#else
58 60static inline int notify_page_fault(struct pt_regs *regs)
59static inline int notify_page_fault(struct pt_regs *regs, long err)
60{ 61{
61 struct die_args args = { 62 return 0;
62 .regs = regs,
63 .str = "page fault",
64 .err = err,
65 .trapnr = 14,
66 .signr = SIGSEGV
67 };
68 return atomic_notifier_call_chain(&notify_page_fault_chain,
69 DIE_PAGE_FAULT, &args);
70} 63}
64#endif
71 65
72/* Sometimes the CPU reports invalid exceptions on prefetch. 66/* Sometimes the CPU reports invalid exceptions on prefetch.
73 Check that here and ignore. 67 Check that here and ignore.
@@ -345,7 +339,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
345 if (vmalloc_fault(address) >= 0) 339 if (vmalloc_fault(address) >= 0)
346 return; 340 return;
347 } 341 }
348 if (notify_page_fault(regs, error_code) == NOTIFY_STOP) 342 if (notify_page_fault(regs))
349 return; 343 return;
350 /* 344 /*
351 * Don't take the mm semaphore here. If we fixup a prefetch 345 * Don't take the mm semaphore here. If we fixup a prefetch
@@ -354,7 +348,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
354 goto bad_area_nosemaphore; 348 goto bad_area_nosemaphore;
355 } 349 }
356 350
357 if (notify_page_fault(regs, error_code) == NOTIFY_STOP) 351 if (notify_page_fault(regs))
358 return; 352 return;
359 353
360 if (likely(regs->eflags & X86_EFLAGS_IF)) 354 if (likely(regs->eflags & X86_EFLAGS_IF))