aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/mmiotrace/kmmio.c
diff options
context:
space:
mode:
authorPekka Paalanen <pq@iki.fi>2008-05-12 15:20:56 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-24 05:21:14 -0400
commit8b7d89d02ef3c6a7c73d6596f28cea7632850af4 (patch)
tree32601bf4f34dd9e3ec1e9610c555e10dc448006c /arch/x86/kernel/mmiotrace/kmmio.c
parent677aa9f77e8de3791b481a0cec6c8b84d1eec626 (diff)
x86: mmiotrace - trace memory mapped IO
Mmiotrace is a tool for trapping memory mapped IO (MMIO) accesses within the kernel. It is used for debugging and especially for reverse engineering evil binary drivers. Mmiotrace works by wrapping the ioremap family of kernel functions and marking the returned pages as not present. Access to the IO memory triggers a page fault, which will be handled by mmiotrace's custom page fault handler. This will single-step the faulted instruction with the MMIO page marked as present. Access logs are directed to user space via relay and debug_fs. This page fault approach is necessary, because binary drivers have readl/writel etc. calls inlined and therefore extremely difficult to trap with with e.g. kprobes. This patch depends on the custom page fault handlers patch. Signed-off-by: Pekka Paalanen <pq@iki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/mmiotrace/kmmio.c')
-rw-r--r--arch/x86/kernel/mmiotrace/kmmio.c391
1 files changed, 391 insertions, 0 deletions
diff --git a/arch/x86/kernel/mmiotrace/kmmio.c b/arch/x86/kernel/mmiotrace/kmmio.c
new file mode 100644
index 000000000000..8ba48f9c91b4
--- /dev/null
+++ b/arch/x86/kernel/mmiotrace/kmmio.c
@@ -0,0 +1,391 @@
1/* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
8#include <linux/version.h>
9#include <linux/spinlock.h>
10#include <linux/hash.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/uaccess.h>
17#include <linux/ptrace.h>
18#include <linux/preempt.h>
19#include <asm/io.h>
20#include <asm/cacheflush.h>
21#include <asm/errno.h>
22#include <asm/tlbflush.h>
23
24#include "kmmio.h"
25
26#define KMMIO_HASH_BITS 6
27#define KMMIO_TABLE_SIZE (1 << KMMIO_HASH_BITS)
28#define KMMIO_PAGE_HASH_BITS 4
29#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
30
31struct kmmio_context {
32 struct kmmio_fault_page *fpage;
33 struct kmmio_probe *probe;
34 unsigned long saved_flags;
35 int active;
36};
37
38static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code,
39 unsigned long address);
40static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
41 void *args);
42
43static DEFINE_SPINLOCK(kmmio_lock);
44
45/* These are protected by kmmio_lock */
46unsigned int kmmio_count;
47static unsigned int handler_registered;
48static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
49static LIST_HEAD(kmmio_probes);
50
51static struct kmmio_context kmmio_ctx[NR_CPUS];
52
53static struct pf_handler kmmio_pf_hook = {
54 .handler = kmmio_page_fault
55};
56
57static struct notifier_block nb_die = {
58 .notifier_call = kmmio_die_notifier
59};
60
61int init_kmmio(void)
62{
63 int i;
64 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
65 INIT_LIST_HEAD(&kmmio_page_table[i]);
66
67 register_die_notifier(&nb_die);
68 return 0;
69}
70
71void cleanup_kmmio(void)
72{
73 /*
74 * Assume the following have been already cleaned by calling
75 * unregister_kmmio_probe() appropriately:
76 * kmmio_page_table, kmmio_probes
77 */
78 if (handler_registered) {
79 unregister_page_fault_handler(&kmmio_pf_hook);
80 synchronize_rcu();
81 }
82 unregister_die_notifier(&nb_die);
83}
84
85/*
86 * this is basically a dynamic stabbing problem:
87 * Could use the existing prio tree code or
88 * Possible better implementations:
89 * The Interval Skip List: A Data Structure for Finding All Intervals That
90 * Overlap a Point (might be simple)
91 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
92 */
93/* Get the kmmio at this addr (if any). You must be holding kmmio_lock. */
94static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
95{
96 struct kmmio_probe *p;
97 list_for_each_entry(p, &kmmio_probes, list) {
98 if (addr >= p->addr && addr <= (p->addr + p->len))
99 return p;
100 }
101 return NULL;
102}
103
104static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
105{
106 struct list_head *head, *tmp;
107
108 page &= PAGE_MASK;
109 head = &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
110 list_for_each(tmp, head) {
111 struct kmmio_fault_page *p
112 = list_entry(tmp, struct kmmio_fault_page, list);
113 if (p->page == page)
114 return p;
115 }
116
117 return NULL;
118}
119
120static void arm_kmmio_fault_page(unsigned long page, int *large)
121{
122 unsigned long address = page & PAGE_MASK;
123 pgd_t *pgd = pgd_offset_k(address);
124 pud_t *pud = pud_offset(pgd, address);
125 pmd_t *pmd = pmd_offset(pud, address);
126 pte_t *pte = pte_offset_kernel(pmd, address);
127
128 if (pmd_large(*pmd)) {
129 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_PRESENT));
130 if (large)
131 *large = 1;
132 } else {
133 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
134 }
135
136 __flush_tlb_one(page);
137}
138
139static void disarm_kmmio_fault_page(unsigned long page, int *large)
140{
141 unsigned long address = page & PAGE_MASK;
142 pgd_t *pgd = pgd_offset_k(address);
143 pud_t *pud = pud_offset(pgd, address);
144 pmd_t *pmd = pmd_offset(pud, address);
145 pte_t *pte = pte_offset_kernel(pmd, address);
146
147 if (large && *large) {
148 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_PRESENT));
149 *large = 0;
150 } else {
151 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
152 }
153
154 __flush_tlb_one(page);
155}
156
157/*
158 * Interrupts are disabled on entry as trap3 is an interrupt gate
159 * and they remain disabled thorough out this function.
160 */
161static int kmmio_handler(struct pt_regs *regs, unsigned long addr)
162{
163 struct kmmio_context *ctx;
164 int cpu;
165
166 /*
167 * Preemption is now disabled to prevent process switch during
168 * single stepping. We can only handle one active kmmio trace
169 * per cpu, so ensure that we finish it before something else
170 * gets to run.
171 *
172 * XXX what if an interrupt occurs between returning from
173 * do_page_fault() and entering the single-step exception handler?
174 * And that interrupt triggers a kmmio trap?
175 */
176 preempt_disable();
177 cpu = smp_processor_id();
178 ctx = &kmmio_ctx[cpu];
179
180 /* interrupts disabled and CPU-local data => atomicity guaranteed. */
181 if (ctx->active) {
182 /*
183 * This avoids a deadlock with kmmio_lock.
184 * If this page fault really was due to kmmio trap,
185 * all hell breaks loose.
186 */
187 printk(KERN_EMERG "mmiotrace: recursive probe hit on CPU %d, "
188 "for address %lu. Ignoring.\n",
189 cpu, addr);
190 goto no_kmmio;
191 }
192 ctx->active++;
193
194 /*
195 * Acquire the kmmio lock to prevent changes affecting
196 * get_kmmio_fault_page() and get_kmmio_probe(), since we save their
197 * returned pointers.
198 * The lock is released in post_kmmio_handler().
199 * XXX: could/should get_kmmio_*() be using RCU instead of spinlock?
200 */
201 spin_lock(&kmmio_lock);
202
203 ctx->fpage = get_kmmio_fault_page(addr);
204 if (!ctx->fpage) {
205 /* this page fault is not caused by kmmio */
206 goto no_kmmio_locked;
207 }
208
209 ctx->probe = get_kmmio_probe(addr);
210 ctx->saved_flags = (regs->flags & (TF_MASK|IF_MASK));
211
212 if (ctx->probe && ctx->probe->pre_handler)
213 ctx->probe->pre_handler(ctx->probe, regs, addr);
214
215 regs->flags |= TF_MASK;
216 regs->flags &= ~IF_MASK;
217
218 /* We hold lock, now we set present bit in PTE and single step. */
219 disarm_kmmio_fault_page(ctx->fpage->page, NULL);
220
221 return 1;
222
223no_kmmio_locked:
224 spin_unlock(&kmmio_lock);
225 ctx->active--;
226no_kmmio:
227 preempt_enable_no_resched();
228 /* page fault not handled by kmmio */
229 return 0;
230}
231
232/*
233 * Interrupts are disabled on entry as trap1 is an interrupt gate
234 * and they remain disabled thorough out this function.
235 * And we hold kmmio lock.
236 */
237static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
238{
239 int cpu = smp_processor_id();
240 struct kmmio_context *ctx = &kmmio_ctx[cpu];
241
242 if (!ctx->active)
243 return 0;
244
245 if (ctx->probe && ctx->probe->post_handler)
246 ctx->probe->post_handler(ctx->probe, condition, regs);
247
248 arm_kmmio_fault_page(ctx->fpage->page, NULL);
249
250 regs->flags &= ~TF_MASK;
251 regs->flags |= ctx->saved_flags;
252
253 /* These were acquired in kmmio_handler(). */
254 ctx->active--;
255 spin_unlock(&kmmio_lock);
256 preempt_enable_no_resched();
257
258 /*
259 * if somebody else is singlestepping across a probe point, flags
260 * will have TF set, in which case, continue the remaining processing
261 * of do_debug, as if this is not a probe hit.
262 */
263 if (regs->flags & TF_MASK)
264 return 0;
265
266 return 1;
267}
268
269static int add_kmmio_fault_page(unsigned long page)
270{
271 struct kmmio_fault_page *f;
272
273 page &= PAGE_MASK;
274 f = get_kmmio_fault_page(page);
275 if (f) {
276 f->count++;
277 return 0;
278 }
279
280 f = kmalloc(sizeof(*f), GFP_ATOMIC);
281 if (!f)
282 return -1;
283
284 f->count = 1;
285 f->page = page;
286 list_add(&f->list,
287 &kmmio_page_table[hash_long(f->page, KMMIO_PAGE_HASH_BITS)]);
288
289 arm_kmmio_fault_page(f->page, NULL);
290
291 return 0;
292}
293
294static void release_kmmio_fault_page(unsigned long page)
295{
296 struct kmmio_fault_page *f;
297
298 page &= PAGE_MASK;
299 f = get_kmmio_fault_page(page);
300 if (!f)
301 return;
302
303 f->count--;
304 if (!f->count) {
305 disarm_kmmio_fault_page(f->page, NULL);
306 list_del(&f->list);
307 }
308}
309
310int register_kmmio_probe(struct kmmio_probe *p)
311{
312 int ret = 0;
313 unsigned long size = 0;
314
315 spin_lock_irq(&kmmio_lock);
316 kmmio_count++;
317 if (get_kmmio_probe(p->addr)) {
318 ret = -EEXIST;
319 goto out;
320 }
321 list_add(&p->list, &kmmio_probes);
322 /*printk("adding fault pages...\n");*/
323 while (size < p->len) {
324 if (add_kmmio_fault_page(p->addr + size))
325 printk(KERN_ERR "mmio: Unable to set page fault.\n");
326 size += PAGE_SIZE;
327 }
328
329 if (!handler_registered) {
330 register_page_fault_handler(&kmmio_pf_hook);
331 handler_registered++;
332 }
333
334out:
335 spin_unlock_irq(&kmmio_lock);
336 /*
337 * XXX: What should I do here?
338 * Here was a call to global_flush_tlb(), but it does not exist
339 * anymore.
340 */
341 return ret;
342}
343
344void unregister_kmmio_probe(struct kmmio_probe *p)
345{
346 unsigned long size = 0;
347
348 spin_lock_irq(&kmmio_lock);
349 while (size < p->len) {
350 release_kmmio_fault_page(p->addr + size);
351 size += PAGE_SIZE;
352 }
353 list_del(&p->list);
354 kmmio_count--;
355 spin_unlock_irq(&kmmio_lock);
356}
357
358/*
359 * According to 2.6.20, mainly x86_64 arch:
360 * This is being called from do_page_fault(), via the page fault notifier
361 * chain. The chain is called for both user space faults and kernel space
362 * faults (address >= TASK_SIZE64), except not on faults serviced by
363 * vmalloc_fault().
364 *
365 * We may be in an interrupt or a critical section. Also prefecthing may
366 * trigger a page fault. We may be in the middle of process switch.
367 * The page fault hook functionality has put us inside RCU read lock.
368 *
369 * Local interrupts are disabled, so preemption cannot happen.
370 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
371 */
372static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code,
373 unsigned long address)
374{
375 if (is_kmmio_active())
376 if (kmmio_handler(regs, address) == 1)
377 return -1;
378 return 0;
379}
380
381static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
382 void *args)
383{
384 struct die_args *arg = args;
385
386 if (val == DIE_DEBUG)
387 if (post_kmmio_handler(arg->err, arg->regs) == 1)
388 return NOTIFY_STOP;
389
390 return NOTIFY_DONE;
391}