diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-07-10 05:43:00 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-07-10 05:43:00 -0400 |
| commit | bac0c9103b31c3dd83ad9d731dd9834e2ba75e4f (patch) | |
| tree | 702dd6a7ce06d224d594c2293af546b11ac9f51b /arch/x86/mm/fault.c | |
| parent | 6329d3021bcfa9038621e6e917d98929421d8ec8 (diff) | |
| parent | 98a05ed4bd7774f533ab185fe0bf2fdc58292d7c (diff) | |
Merge branch 'tracing/ftrace' into auto-ftrace-next
Diffstat (limited to 'arch/x86/mm/fault.c')
| -rw-r--r-- | arch/x86/mm/fault.c | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8bcb6f40ccb6..42394b353c6a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
| @@ -49,6 +49,60 @@ | |||
| 49 | #define PF_RSVD (1<<3) | 49 | #define PF_RSVD (1<<3) |
| 50 | #define PF_INSTR (1<<4) | 50 | #define PF_INSTR (1<<4) |
| 51 | 51 | ||
| 52 | #ifdef CONFIG_PAGE_FAULT_HANDLERS | ||
| 53 | static HLIST_HEAD(pf_handlers); /* protected by RCU */ | ||
| 54 | static DEFINE_SPINLOCK(pf_handlers_writer); | ||
| 55 | |||
| 56 | void register_page_fault_handler(struct pf_handler *new_pfh) | ||
| 57 | { | ||
| 58 | unsigned long flags; | ||
| 59 | spin_lock_irqsave(&pf_handlers_writer, flags); | ||
| 60 | hlist_add_head_rcu(&new_pfh->hlist, &pf_handlers); | ||
| 61 | spin_unlock_irqrestore(&pf_handlers_writer, flags); | ||
| 62 | } | ||
| 63 | EXPORT_SYMBOL_GPL(register_page_fault_handler); | ||
| 64 | |||
| 65 | /** | ||
| 66 | * unregister_page_fault_handler: | ||
| 67 | * The caller must ensure @old_pfh is not in use anymore before freeing it. | ||
| 68 | * This function does not guarantee it. The list of handlers is protected by | ||
| 69 | * RCU, so you can do this by e.g. calling synchronize_rcu(). | ||
| 70 | */ | ||
| 71 | void unregister_page_fault_handler(struct pf_handler *old_pfh) | ||
| 72 | { | ||
| 73 | unsigned long flags; | ||
| 74 | spin_lock_irqsave(&pf_handlers_writer, flags); | ||
| 75 | hlist_del_rcu(&old_pfh->hlist); | ||
| 76 | spin_unlock_irqrestore(&pf_handlers_writer, flags); | ||
| 77 | } | ||
| 78 | EXPORT_SYMBOL_GPL(unregister_page_fault_handler); | ||
| 79 | #endif | ||
| 80 | |||
| 81 | /* returns non-zero if do_page_fault() should return */ | ||
| 82 | static int handle_custom_pf(struct pt_regs *regs, unsigned long error_code, | ||
| 83 | unsigned long address) | ||
| 84 | { | ||
| 85 | #ifdef CONFIG_PAGE_FAULT_HANDLERS | ||
| 86 | int ret = 0; | ||
| 87 | struct pf_handler *cur; | ||
| 88 | struct hlist_node *ncur; | ||
| 89 | |||
| 90 | if (hlist_empty(&pf_handlers)) | ||
| 91 | return 0; | ||
| 92 | |||
| 93 | rcu_read_lock(); | ||
| 94 | hlist_for_each_entry_rcu(cur, ncur, &pf_handlers, hlist) { | ||
| 95 | ret = cur->handler(regs, error_code, address); | ||
| 96 | if (ret) | ||
| 97 | break; | ||
| 98 | } | ||
| 99 | rcu_read_unlock(); | ||
| 100 | return ret; | ||
| 101 | #else | ||
| 102 | return 0; | ||
| 103 | #endif | ||
| 104 | } | ||
| 105 | |||
| 52 | static inline int notify_page_fault(struct pt_regs *regs) | 106 | static inline int notify_page_fault(struct pt_regs *regs) |
| 53 | { | 107 | { |
| 54 | #ifdef CONFIG_KPROBES | 108 | #ifdef CONFIG_KPROBES |
| @@ -606,6 +660,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
| 606 | 660 | ||
| 607 | if (notify_page_fault(regs)) | 661 | if (notify_page_fault(regs)) |
| 608 | return; | 662 | return; |
| 663 | if (handle_custom_pf(regs, error_code, address)) | ||
| 664 | return; | ||
| 609 | 665 | ||
| 610 | /* | 666 | /* |
| 611 | * We fault-in kernel-space virtual memory on-demand. The | 667 | * We fault-in kernel-space virtual memory on-demand. The |
