diff options
author | Christoph Hellwig <hch@lst.de> | 2007-10-16 04:24:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:42:50 -0400 |
commit | 74a0b5762713a26496db72eac34fbbed46f20fce (patch) | |
tree | 4a14df7c07ebc16283454f33713519a0e10b5c43 /arch | |
parent | d5a7430ddcdb598261d70f7eb1bf450b5be52085 (diff) |
x86: optimize page faults like all other achitectures and kill notifier cruft
x86(-64) are the last architectures still using the page fault notifier
cruft for the kprobes page fault hook. This patch converts them to the
proper direct calls, and removes the now unused pagefault notifier bits
aswell as the cruft in kprobes.c that was related to this mess.
I know Andi didn't really like this, but all other architecture maintainers
agreed the direct calls are much better and besides the obvious cruft
removal a common way of dealing with kprobes across architectures is
important aswell.
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix sparc64]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Andi Kleen <ak@suse.de>
Cc: <linux-arch@vger.kernel.org>
Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/kprobes_32.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes_64.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/fault_32.c | 43 | ||||
-rw-r--r-- | arch/x86/mm/fault_64.c | 44 |
4 files changed, 39 insertions, 52 deletions
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c index e7d0d3c2ef64..06b86e5617f6 100644 --- a/arch/x86/kernel/kprobes_32.c +++ b/arch/x86/kernel/kprobes_32.c | |||
@@ -584,7 +584,7 @@ out: | |||
584 | return 1; | 584 | return 1; |
585 | } | 585 | } |
586 | 586 | ||
587 | static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 587 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
588 | { | 588 | { |
589 | struct kprobe *cur = kprobe_running(); | 589 | struct kprobe *cur = kprobe_running(); |
590 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 590 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
@@ -666,7 +666,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
666 | ret = NOTIFY_STOP; | 666 | ret = NOTIFY_STOP; |
667 | break; | 667 | break; |
668 | case DIE_GPF: | 668 | case DIE_GPF: |
669 | case DIE_PAGE_FAULT: | ||
670 | /* kprobe_running() needs smp_processor_id() */ | 669 | /* kprobe_running() needs smp_processor_id() */ |
671 | preempt_disable(); | 670 | preempt_disable(); |
672 | if (kprobe_running() && | 671 | if (kprobe_running() && |
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c index 62e28e52d784..7c16506d681f 100644 --- a/arch/x86/kernel/kprobes_64.c +++ b/arch/x86/kernel/kprobes_64.c | |||
@@ -657,7 +657,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
657 | ret = NOTIFY_STOP; | 657 | ret = NOTIFY_STOP; |
658 | break; | 658 | break; |
659 | case DIE_GPF: | 659 | case DIE_GPF: |
660 | case DIE_PAGE_FAULT: | ||
661 | /* kprobe_running() needs smp_processor_id() */ | 660 | /* kprobe_running() needs smp_processor_id() */ |
662 | preempt_disable(); | 661 | preempt_disable(); |
663 | if (kprobe_running() && | 662 | if (kprobe_running() && |
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c index fcb38e7f3543..be72c2a5b03b 100644 --- a/arch/x86/mm/fault_32.c +++ b/arch/x86/mm/fault_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/kprobes.h> | 25 | #include <linux/kprobes.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/kdebug.h> | 27 | #include <linux/kdebug.h> |
28 | #include <linux/kprobes.h> | ||
28 | 29 | ||
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
30 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
@@ -32,33 +33,27 @@ | |||
32 | 33 | ||
33 | extern void die(const char *,struct pt_regs *,long); | 34 | extern void die(const char *,struct pt_regs *,long); |
34 | 35 | ||
35 | static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | 36 | #ifdef CONFIG_KPROBES |
36 | 37 | static inline int notify_page_fault(struct pt_regs *regs) | |
37 | int register_page_fault_notifier(struct notifier_block *nb) | ||
38 | { | 38 | { |
39 | vmalloc_sync_all(); | 39 | int ret = 0; |
40 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | 40 | |
41 | } | 41 | /* kprobe_running() needs smp_processor_id() */ |
42 | EXPORT_SYMBOL_GPL(register_page_fault_notifier); | 42 | if (!user_mode_vm(regs)) { |
43 | preempt_disable(); | ||
44 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | ||
45 | ret = 1; | ||
46 | preempt_enable(); | ||
47 | } | ||
43 | 48 | ||
44 | int unregister_page_fault_notifier(struct notifier_block *nb) | 49 | return ret; |
45 | { | ||
46 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | ||
47 | } | 50 | } |
48 | EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); | 51 | #else |
49 | 52 | static inline int notify_page_fault(struct pt_regs *regs) | |
50 | static inline int notify_page_fault(struct pt_regs *regs, long err) | ||
51 | { | 53 | { |
52 | struct die_args args = { | 54 | return 0; |
53 | .regs = regs, | ||
54 | .str = "page fault", | ||
55 | .err = err, | ||
56 | .trapnr = 14, | ||
57 | .signr = SIGSEGV | ||
58 | }; | ||
59 | return atomic_notifier_call_chain(¬ify_page_fault_chain, | ||
60 | DIE_PAGE_FAULT, &args); | ||
61 | } | 55 | } |
56 | #endif | ||
62 | 57 | ||
63 | /* | 58 | /* |
64 | * Return EIP plus the CS segment base. The segment limit is also | 59 | * Return EIP plus the CS segment base. The segment limit is also |
@@ -331,7 +326,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, | |||
331 | if (unlikely(address >= TASK_SIZE)) { | 326 | if (unlikely(address >= TASK_SIZE)) { |
332 | if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) | 327 | if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) |
333 | return; | 328 | return; |
334 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) | 329 | if (notify_page_fault(regs)) |
335 | return; | 330 | return; |
336 | /* | 331 | /* |
337 | * Don't take the mm semaphore here. If we fixup a prefetch | 332 | * Don't take the mm semaphore here. If we fixup a prefetch |
@@ -340,7 +335,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, | |||
340 | goto bad_area_nosemaphore; | 335 | goto bad_area_nosemaphore; |
341 | } | 336 | } |
342 | 337 | ||
343 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) | 338 | if (notify_page_fault(regs)) |
344 | return; | 339 | return; |
345 | 340 | ||
346 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc | 341 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc |
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c index 54816adb8e93..5e0e54906c48 100644 --- a/arch/x86/mm/fault_64.c +++ b/arch/x86/mm/fault_64.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/kprobes.h> | 25 | #include <linux/kprobes.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/kdebug.h> | 27 | #include <linux/kdebug.h> |
28 | #include <linux/kprobes.h> | ||
28 | 29 | ||
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
30 | #include <asm/pgalloc.h> | 31 | #include <asm/pgalloc.h> |
@@ -40,34 +41,27 @@ | |||
40 | #define PF_RSVD (1<<3) | 41 | #define PF_RSVD (1<<3) |
41 | #define PF_INSTR (1<<4) | 42 | #define PF_INSTR (1<<4) |
42 | 43 | ||
43 | static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | 44 | #ifdef CONFIG_KPROBES |
44 | 45 | static inline int notify_page_fault(struct pt_regs *regs) | |
45 | /* Hook to register for page fault notifications */ | ||
46 | int register_page_fault_notifier(struct notifier_block *nb) | ||
47 | { | 46 | { |
48 | vmalloc_sync_all(); | 47 | int ret = 0; |
49 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | 48 | |
50 | } | 49 | /* kprobe_running() needs smp_processor_id() */ |
51 | EXPORT_SYMBOL_GPL(register_page_fault_notifier); | 50 | if (!user_mode(regs)) { |
51 | preempt_disable(); | ||
52 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | ||
53 | ret = 1; | ||
54 | preempt_enable(); | ||
55 | } | ||
52 | 56 | ||
53 | int unregister_page_fault_notifier(struct notifier_block *nb) | 57 | return ret; |
54 | { | ||
55 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | ||
56 | } | 58 | } |
57 | EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); | 59 | #else |
58 | 60 | static inline int notify_page_fault(struct pt_regs *regs) | |
59 | static inline int notify_page_fault(struct pt_regs *regs, long err) | ||
60 | { | 61 | { |
61 | struct die_args args = { | 62 | return 0; |
62 | .regs = regs, | ||
63 | .str = "page fault", | ||
64 | .err = err, | ||
65 | .trapnr = 14, | ||
66 | .signr = SIGSEGV | ||
67 | }; | ||
68 | return atomic_notifier_call_chain(¬ify_page_fault_chain, | ||
69 | DIE_PAGE_FAULT, &args); | ||
70 | } | 63 | } |
64 | #endif | ||
71 | 65 | ||
72 | /* Sometimes the CPU reports invalid exceptions on prefetch. | 66 | /* Sometimes the CPU reports invalid exceptions on prefetch. |
73 | Check that here and ignore. | 67 | Check that here and ignore. |
@@ -345,7 +339,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
345 | if (vmalloc_fault(address) >= 0) | 339 | if (vmalloc_fault(address) >= 0) |
346 | return; | 340 | return; |
347 | } | 341 | } |
348 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) | 342 | if (notify_page_fault(regs)) |
349 | return; | 343 | return; |
350 | /* | 344 | /* |
351 | * Don't take the mm semaphore here. If we fixup a prefetch | 345 | * Don't take the mm semaphore here. If we fixup a prefetch |
@@ -354,7 +348,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
354 | goto bad_area_nosemaphore; | 348 | goto bad_area_nosemaphore; |
355 | } | 349 | } |
356 | 350 | ||
357 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) | 351 | if (notify_page_fault(regs)) |
358 | return; | 352 | return; |
359 | 353 | ||
360 | if (likely(regs->eflags & X86_EFLAGS_IF)) | 354 | if (likely(regs->eflags & X86_EFLAGS_IF)) |