aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPekka Paalanen <pq@iki.fi>2008-05-12 15:20:57 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-24 05:21:55 -0400
commit10c43d2eb50c9a5ad60388b9d3c41c31150049e6 (patch)
treed5a15fe0661a880366b89d0f15cb79740a7302bb /arch
parent63ffa3e456c1a9884a3ebac997d91e3fdae18d78 (diff)
x86: explicit call to mmiotrace in do_page_fault()
The custom page fault handler list is replaced with a single function pointer. All related functions and variables are renamed for mmiotrace. Signed-off-by: Pekka Paalanen <pq@iki.fi> Cc: Christoph Hellwig <hch@infradead.org> Cc: Arjan van de Ven <arjan@infradead.org> Cc: pq@iki.fi Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig.debug14
-rw-r--r--arch/x86/kernel/mmiotrace/kmmio.c14
-rw-r--r--arch/x86/mm/fault.c66
3 files changed, 47 insertions, 47 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 7c6496e2225e..9491c0ae03a3 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -168,20 +168,18 @@ config IOMMU_LEAK
168 Add a simple leak tracer to the IOMMU code. This is useful when you 168 Add a simple leak tracer to the IOMMU code. This is useful when you
169 are debugging a buggy device driver that leaks IOMMU mappings. 169 are debugging a buggy device driver that leaks IOMMU mappings.
170 170
171config PAGE_FAULT_HANDLERS 171config MMIOTRACE_HOOKS
172 bool "Custom page fault handlers" 172 bool
173 depends on DEBUG_KERNEL 173 default n
174 help
175 Allow the use of custom page fault handlers. A kernel module may
176 register a function that is called on every page fault. Custom
177 handlers are used by some debugging and reverse engineering tools.
178 174
179config MMIOTRACE 175config MMIOTRACE
180 tristate "Memory mapped IO tracing" 176 tristate "Memory mapped IO tracing"
181 depends on DEBUG_KERNEL && PAGE_FAULT_HANDLERS && RELAY && DEBUG_FS 177 depends on DEBUG_KERNEL && RELAY && DEBUG_FS
178 select MMIOTRACE_HOOKS
182 default n 179 default n
183 help 180 help
184 This will build a kernel module called mmiotrace. 181 This will build a kernel module called mmiotrace.
182 Making this a built-in is heavily discouraged.
185 183
186 Mmiotrace traces Memory Mapped I/O access and is meant for debugging 184 Mmiotrace traces Memory Mapped I/O access and is meant for debugging
187 and reverse engineering. The kernel module offers wrapped 185 and reverse engineering. The kernel module offers wrapped
diff --git a/arch/x86/kernel/mmiotrace/kmmio.c b/arch/x86/kernel/mmiotrace/kmmio.c
index 28411dadb8b3..e759f7c3878f 100644
--- a/arch/x86/kernel/mmiotrace/kmmio.c
+++ b/arch/x86/kernel/mmiotrace/kmmio.c
@@ -51,10 +51,6 @@ static LIST_HEAD(kmmio_probes);
51 51
52static struct kmmio_context kmmio_ctx[NR_CPUS]; 52static struct kmmio_context kmmio_ctx[NR_CPUS];
53 53
54static struct pf_handler kmmio_pf_hook = {
55 .handler = kmmio_page_fault
56};
57
58static struct notifier_block nb_die = { 54static struct notifier_block nb_die = {
59 .notifier_call = kmmio_die_notifier 55 .notifier_call = kmmio_die_notifier
60}; 56};
@@ -77,7 +73,8 @@ void cleanup_kmmio(void)
77 * kmmio_page_table, kmmio_probes 73 * kmmio_page_table, kmmio_probes
78 */ 74 */
79 if (handler_registered) { 75 if (handler_registered) {
80 unregister_page_fault_handler(&kmmio_pf_hook); 76 if (mmiotrace_unregister_pf(&kmmio_page_fault))
77 BUG();
81 synchronize_rcu(); 78 synchronize_rcu();
82 } 79 }
83 unregister_die_notifier(&nb_die); 80 unregister_die_notifier(&nb_die);
@@ -343,8 +340,11 @@ int register_kmmio_probe(struct kmmio_probe *p)
343 } 340 }
344 341
345 if (!handler_registered) { 342 if (!handler_registered) {
346 register_page_fault_handler(&kmmio_pf_hook); 343 if (mmiotrace_register_pf(&kmmio_page_fault))
347 handler_registered++; 344 printk(KERN_ERR "mmiotrace: Cannot register page "
345 "fault handler.\n");
346 else
347 handler_registered++;
348 } 348 }
349 349
350out: 350out:
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 343f5c1aacc8..e9a086a1a9ff 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -49,53 +49,55 @@
49#define PF_RSVD (1<<3) 49#define PF_RSVD (1<<3)
50#define PF_INSTR (1<<4) 50#define PF_INSTR (1<<4)
51 51
52#ifdef CONFIG_PAGE_FAULT_HANDLERS 52#ifdef CONFIG_MMIOTRACE_HOOKS
53static HLIST_HEAD(pf_handlers); /* protected by RCU */ 53static pf_handler_func mmiotrace_pf_handler; /* protected by RCU */
54static DEFINE_SPINLOCK(pf_handlers_writer); 54static DEFINE_SPINLOCK(mmiotrace_handler_lock);
55 55
56void register_page_fault_handler(struct pf_handler *new_pfh) 56int mmiotrace_register_pf(pf_handler_func new_pfh)
57{ 57{
58 int ret = 0;
58 unsigned long flags; 59 unsigned long flags;
59 spin_lock_irqsave(&pf_handlers_writer, flags); 60 spin_lock_irqsave(&mmiotrace_handler_lock, flags);
60 hlist_add_head_rcu(&new_pfh->hlist, &pf_handlers); 61 if (mmiotrace_pf_handler)
61 spin_unlock_irqrestore(&pf_handlers_writer, flags); 62 ret = -EBUSY;
63 else
64 mmiotrace_pf_handler = new_pfh;
65 spin_unlock_irqrestore(&mmiotrace_handler_lock, flags);
66 return ret;
62} 67}
63EXPORT_SYMBOL_GPL(register_page_fault_handler); 68EXPORT_SYMBOL_GPL(mmiotrace_register_pf);
64 69
65/** 70/**
66 * unregister_page_fault_handler: 71 * mmiotrace_unregister_pf:
67 * The caller must ensure @old_pfh is not in use anymore before freeing it. 72 * The caller must ensure @old_pfh is not in use anymore before freeing it.
68 * This function does not guarantee it. The list of handlers is protected by 73 * This function does not guarantee it. The handler function pointer is
69 * RCU, so you can do this by e.g. calling synchronize_rcu(). 74 * protected by RCU, so you can do this by e.g. calling synchronize_rcu().
70 */ 75 */
71void unregister_page_fault_handler(struct pf_handler *old_pfh) 76int mmiotrace_unregister_pf(pf_handler_func old_pfh)
72{ 77{
78 int ret = 0;
73 unsigned long flags; 79 unsigned long flags;
74 spin_lock_irqsave(&pf_handlers_writer, flags); 80 spin_lock_irqsave(&mmiotrace_handler_lock, flags);
75 hlist_del_rcu(&old_pfh->hlist); 81 if (mmiotrace_pf_handler != old_pfh)
76 spin_unlock_irqrestore(&pf_handlers_writer, flags); 82 ret = -EPERM;
83 else
84 mmiotrace_pf_handler = NULL;
85 spin_unlock_irqrestore(&mmiotrace_handler_lock, flags);
86 return ret;
77} 87}
78EXPORT_SYMBOL_GPL(unregister_page_fault_handler); 88EXPORT_SYMBOL_GPL(mmiotrace_unregister_pf);
79#endif 89#endif /* CONFIG_MMIOTRACE_HOOKS */
80 90
81/* returns non-zero if do_page_fault() should return */ 91/* returns non-zero if do_page_fault() should return */
82static int handle_custom_pf(struct pt_regs *regs, unsigned long error_code, 92static inline int call_mmiotrace(struct pt_regs *regs,
83 unsigned long address) 93 unsigned long error_code,
94 unsigned long address)
84{ 95{
85#ifdef CONFIG_PAGE_FAULT_HANDLERS 96#ifdef CONFIG_MMIOTRACE_HOOKS
86 int ret = 0; 97 int ret = 0;
87 struct pf_handler *cur;
88 struct hlist_node *ncur;
89
90 if (hlist_empty(&pf_handlers))
91 return 0;
92
93 rcu_read_lock(); 98 rcu_read_lock();
94 hlist_for_each_entry_rcu(cur, ncur, &pf_handlers, hlist) { 99 if (mmiotrace_pf_handler)
95 ret = cur->handler(regs, error_code, address); 100 ret = mmiotrace_pf_handler(regs, error_code, address);
96 if (ret)
97 break;
98 }
99 rcu_read_unlock(); 101 rcu_read_unlock();
100 return ret; 102 return ret;
101#else 103#else
@@ -655,7 +657,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
655 657
656 if (notify_page_fault(regs)) 658 if (notify_page_fault(regs))
657 return; 659 return;
658 if (handle_custom_pf(regs, error_code, address)) 660 if (call_mmiotrace(regs, error_code, address))
659 return; 661 return;
660 662
661 /* 663 /*