diff options
author | Pekka Paalanen <pq@iki.fi> | 2008-05-12 15:20:56 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 15:16:38 -0400 |
commit | 86069782d62e731b4835a0cf8eb7d1d0e17cf306 (patch) | |
tree | e6268f782a96d7ce808f354b83be69d4b349659d /arch/x86/mm/fault.c | |
parent | 8f0f996e80b980fba07d11961d96a5fefb60976a (diff) |
x86: add a list for custom page fault handlers.
Provides kernel modules a way to register custom page fault handlers.
On every page fault this will call a list of registered functions. The
functions may handle the fault and force do_page_fault() to return
immediately.
This functionality is similar to the now removed page fault notifiers.
Custom page fault handlers are used by debugging and reverse engineering
tools. Mmiotrace is one such tool and a patch to add it into the tree
will follow.
The custom page fault handlers are called earlier in do_page_fault()
than the page fault notifiers were.
Signed-off-by: Pekka Paalanen <pq@iki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r-- | arch/x86/mm/fault.c | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index fd7e1798c75a..343f5c1aacc8 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -49,6 +49,60 @@ | |||
49 | #define PF_RSVD (1<<3) | 49 | #define PF_RSVD (1<<3) |
50 | #define PF_INSTR (1<<4) | 50 | #define PF_INSTR (1<<4) |
51 | 51 | ||
52 | #ifdef CONFIG_PAGE_FAULT_HANDLERS | ||
53 | static HLIST_HEAD(pf_handlers); /* protected by RCU */ | ||
54 | static DEFINE_SPINLOCK(pf_handlers_writer); | ||
55 | |||
56 | void register_page_fault_handler(struct pf_handler *new_pfh) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | spin_lock_irqsave(&pf_handlers_writer, flags); | ||
60 | hlist_add_head_rcu(&new_pfh->hlist, &pf_handlers); | ||
61 | spin_unlock_irqrestore(&pf_handlers_writer, flags); | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(register_page_fault_handler); | ||
64 | |||
65 | /** | ||
66 | * unregister_page_fault_handler: | ||
67 | * The caller must ensure @old_pfh is not in use anymore before freeing it. | ||
68 | * This function does not guarantee it. The list of handlers is protected by | ||
69 | * RCU, so you can do this by e.g. calling synchronize_rcu(). | ||
70 | */ | ||
71 | void unregister_page_fault_handler(struct pf_handler *old_pfh) | ||
72 | { | ||
73 | unsigned long flags; | ||
74 | spin_lock_irqsave(&pf_handlers_writer, flags); | ||
75 | hlist_del_rcu(&old_pfh->hlist); | ||
76 | spin_unlock_irqrestore(&pf_handlers_writer, flags); | ||
77 | } | ||
78 | EXPORT_SYMBOL_GPL(unregister_page_fault_handler); | ||
79 | #endif | ||
80 | |||
81 | /* returns non-zero if do_page_fault() should return */ | ||
82 | static int handle_custom_pf(struct pt_regs *regs, unsigned long error_code, | ||
83 | unsigned long address) | ||
84 | { | ||
85 | #ifdef CONFIG_PAGE_FAULT_HANDLERS | ||
86 | int ret = 0; | ||
87 | struct pf_handler *cur; | ||
88 | struct hlist_node *ncur; | ||
89 | |||
90 | if (hlist_empty(&pf_handlers)) | ||
91 | return 0; | ||
92 | |||
93 | rcu_read_lock(); | ||
94 | hlist_for_each_entry_rcu(cur, ncur, &pf_handlers, hlist) { | ||
95 | ret = cur->handler(regs, error_code, address); | ||
96 | if (ret) | ||
97 | break; | ||
98 | } | ||
99 | rcu_read_unlock(); | ||
100 | return ret; | ||
101 | #else | ||
102 | return 0; | ||
103 | #endif | ||
104 | } | ||
105 | |||
52 | static inline int notify_page_fault(struct pt_regs *regs) | 106 | static inline int notify_page_fault(struct pt_regs *regs) |
53 | { | 107 | { |
54 | #ifdef CONFIG_KPROBES | 108 | #ifdef CONFIG_KPROBES |
@@ -601,6 +655,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
601 | 655 | ||
602 | if (notify_page_fault(regs)) | 656 | if (notify_page_fault(regs)) |
603 | return; | 657 | return; |
658 | if (handle_custom_pf(regs, error_code, address)) | ||
659 | return; | ||
604 | 660 | ||
605 | /* | 661 | /* |
606 | * We fault-in kernel-space virtual memory on-demand. The | 662 | * We fault-in kernel-space virtual memory on-demand. The |