aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/mmiotrace/kmmio.c
diff options
context:
space:
mode:
authorPekka Paalanen <pq@iki.fi>2008-05-12 15:20:57 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-24 05:22:24 -0400
commitd61fc44853f46fb002228b18aa5f30db21fcd4ac (patch)
tree14fa9416aeceb7c5d24876c1111f6f2458a1dc7d /arch/x86/kernel/mmiotrace/kmmio.c
parent0fd0e3da4557c479b820b9a4a7afa25b4637ddf2 (diff)
x86: mmiotrace, preview 2
Kconfig.debug, Makefile and testmmiotrace.c style fixes. Use real mutex instead of mutex. Fix failure path in register probe func. kmmio: RCU read-locked over single stepping. Generate mapping id's. Make mmio-mod.c built-in and rewrite its locking. Add debugfs file to enable/disable mmiotracing. kmmio: use irqsave spinlocks. Lots of cleanups in mmio-mod.c Marker file moved from /proc into debugfs. Call mmiotrace entrypoints directly from ioremap.c. Signed-off-by: Pekka Paalanen <pq@iki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/mmiotrace/kmmio.c')
-rw-r--r--arch/x86/kernel/mmiotrace/kmmio.c72
1 files changed, 32 insertions, 40 deletions
diff --git a/arch/x86/kernel/mmiotrace/kmmio.c b/arch/x86/kernel/mmiotrace/kmmio.c
index 539a9b19588f..efb467933087 100644
--- a/arch/x86/kernel/mmiotrace/kmmio.c
+++ b/arch/x86/kernel/mmiotrace/kmmio.c
@@ -19,6 +19,7 @@
19#include <linux/preempt.h> 19#include <linux/preempt.h>
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/kdebug.h> 21#include <linux/kdebug.h>
22#include <linux/mutex.h>
22#include <asm/io.h> 23#include <asm/io.h>
23#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
24#include <asm/errno.h> 25#include <asm/errno.h>
@@ -59,7 +60,7 @@ struct kmmio_context {
59static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, 60static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
60 void *args); 61 void *args);
61 62
62static DECLARE_MUTEX(kmmio_init_mutex); 63static DEFINE_MUTEX(kmmio_init_mutex);
63static DEFINE_SPINLOCK(kmmio_lock); 64static DEFINE_SPINLOCK(kmmio_lock);
64 65
65/* These are protected by kmmio_lock */ 66/* These are protected by kmmio_lock */
@@ -90,7 +91,7 @@ static struct notifier_block nb_die = {
90 */ 91 */
91void reference_kmmio(void) 92void reference_kmmio(void)
92{ 93{
93 down(&kmmio_init_mutex); 94 mutex_lock(&kmmio_init_mutex);
94 spin_lock_irq(&kmmio_lock); 95 spin_lock_irq(&kmmio_lock);
95 if (!kmmio_initialized) { 96 if (!kmmio_initialized) {
96 int i; 97 int i;
@@ -101,7 +102,7 @@ void reference_kmmio(void)
101 } 102 }
102 kmmio_initialized++; 103 kmmio_initialized++;
103 spin_unlock_irq(&kmmio_lock); 104 spin_unlock_irq(&kmmio_lock);
104 up(&kmmio_init_mutex); 105 mutex_unlock(&kmmio_init_mutex);
105} 106}
106EXPORT_SYMBOL_GPL(reference_kmmio); 107EXPORT_SYMBOL_GPL(reference_kmmio);
107 108
@@ -115,7 +116,7 @@ void unreference_kmmio(void)
115{ 116{
116 bool unreg = false; 117 bool unreg = false;
117 118
118 down(&kmmio_init_mutex); 119 mutex_lock(&kmmio_init_mutex);
119 spin_lock_irq(&kmmio_lock); 120 spin_lock_irq(&kmmio_lock);
120 121
121 if (kmmio_initialized == 1) { 122 if (kmmio_initialized == 1) {
@@ -128,7 +129,7 @@ void unreference_kmmio(void)
128 129
129 if (unreg) 130 if (unreg)
130 unregister_die_notifier(&nb_die); /* calls sync_rcu() */ 131 unregister_die_notifier(&nb_die); /* calls sync_rcu() */
131 up(&kmmio_init_mutex); 132 mutex_unlock(&kmmio_init_mutex);
132} 133}
133EXPORT_SYMBOL(unreference_kmmio); 134EXPORT_SYMBOL(unreference_kmmio);
134 135
@@ -244,17 +245,13 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
244 * Preemption is now disabled to prevent process switch during 245 * Preemption is now disabled to prevent process switch during
245 * single stepping. We can only handle one active kmmio trace 246 * single stepping. We can only handle one active kmmio trace
246 * per cpu, so ensure that we finish it before something else 247 * per cpu, so ensure that we finish it before something else
247 * gets to run. 248 * gets to run. We also hold the RCU read lock over single
248 * 249 * stepping to avoid looking up the probe and kmmio_fault_page
249 * XXX what if an interrupt occurs between returning from 250 * again.
250 * do_page_fault() and entering the single-step exception handler?
251 * And that interrupt triggers a kmmio trap?
252 * XXX If we tracing an interrupt service routine or whatever, is
253 * this enough to keep it on the current cpu?
254 */ 251 */
255 preempt_disable(); 252 preempt_disable();
256
257 rcu_read_lock(); 253 rcu_read_lock();
254
258 faultpage = get_kmmio_fault_page(addr); 255 faultpage = get_kmmio_fault_page(addr);
259 if (!faultpage) { 256 if (!faultpage) {
260 /* 257 /*
@@ -287,14 +284,24 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
287 if (ctx->probe && ctx->probe->pre_handler) 284 if (ctx->probe && ctx->probe->pre_handler)
288 ctx->probe->pre_handler(ctx->probe, regs, addr); 285 ctx->probe->pre_handler(ctx->probe, regs, addr);
289 286
287 /*
288 * Enable single-stepping and disable interrupts for the faulting
289 * context. Local interrupts must not get enabled during stepping.
290 */
290 regs->flags |= TF_MASK; 291 regs->flags |= TF_MASK;
291 regs->flags &= ~IF_MASK; 292 regs->flags &= ~IF_MASK;
292 293
293 /* Now we set present bit in PTE and single step. */ 294 /* Now we set present bit in PTE and single step. */
294 disarm_kmmio_fault_page(ctx->fpage->page, NULL); 295 disarm_kmmio_fault_page(ctx->fpage->page, NULL);
295 296
297 /*
298 * If another cpu accesses the same page while we are stepping,
299 * the access will not be caught. It will simply succeed and the
300 * only downside is we lose the event. If this becomes a problem,
301 * the user should drop to single cpu before tracing.
302 */
303
296 put_cpu_var(kmmio_ctx); 304 put_cpu_var(kmmio_ctx);
297 rcu_read_unlock();
298 return 1; 305 return 1;
299 306
300no_kmmio_ctx: 307no_kmmio_ctx:
@@ -313,32 +320,15 @@ no_kmmio:
313static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) 320static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
314{ 321{
315 int ret = 0; 322 int ret = 0;
316 struct kmmio_probe *probe;
317 struct kmmio_fault_page *faultpage;
318 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); 323 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
319 324
320 if (!ctx->active) 325 if (!ctx->active)
321 goto out; 326 goto out;
322 327
323 rcu_read_lock();
324
325 faultpage = get_kmmio_fault_page(ctx->addr);
326 probe = get_kmmio_probe(ctx->addr);
327 if (faultpage != ctx->fpage || probe != ctx->probe) {
328 /*
329 * The trace setup changed after kmmio_handler() and before
330 * running this respective post handler. User does not want
331 * the result anymore.
332 */
333 ctx->probe = NULL;
334 ctx->fpage = NULL;
335 }
336
337 if (ctx->probe && ctx->probe->post_handler) 328 if (ctx->probe && ctx->probe->post_handler)
338 ctx->probe->post_handler(ctx->probe, condition, regs); 329 ctx->probe->post_handler(ctx->probe, condition, regs);
339 330
340 if (ctx->fpage) 331 arm_kmmio_fault_page(ctx->fpage->page, NULL);
341 arm_kmmio_fault_page(ctx->fpage->page, NULL);
342 332
343 regs->flags &= ~TF_MASK; 333 regs->flags &= ~TF_MASK;
344 regs->flags |= ctx->saved_flags; 334 regs->flags |= ctx->saved_flags;
@@ -346,6 +336,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
346 /* These were acquired in kmmio_handler(). */ 336 /* These were acquired in kmmio_handler(). */
347 ctx->active--; 337 ctx->active--;
348 BUG_ON(ctx->active); 338 BUG_ON(ctx->active);
339 rcu_read_unlock();
349 preempt_enable_no_resched(); 340 preempt_enable_no_resched();
350 341
351 /* 342 /*
@@ -355,8 +346,6 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
355 */ 346 */
356 if (!(regs->flags & TF_MASK)) 347 if (!(regs->flags & TF_MASK))
357 ret = 1; 348 ret = 1;
358
359 rcu_read_unlock();
360out: 349out:
361 put_cpu_var(kmmio_ctx); 350 put_cpu_var(kmmio_ctx);
362 return ret; 351 return ret;
@@ -411,15 +400,16 @@ static void release_kmmio_fault_page(unsigned long page,
411 400
412int register_kmmio_probe(struct kmmio_probe *p) 401int register_kmmio_probe(struct kmmio_probe *p)
413{ 402{
403 unsigned long flags;
414 int ret = 0; 404 int ret = 0;
415 unsigned long size = 0; 405 unsigned long size = 0;
416 406
417 spin_lock_irq(&kmmio_lock); 407 spin_lock_irqsave(&kmmio_lock, flags);
418 kmmio_count++;
419 if (get_kmmio_probe(p->addr)) { 408 if (get_kmmio_probe(p->addr)) {
420 ret = -EEXIST; 409 ret = -EEXIST;
421 goto out; 410 goto out;
422 } 411 }
412 kmmio_count++;
423 list_add_rcu(&p->list, &kmmio_probes); 413 list_add_rcu(&p->list, &kmmio_probes);
424 while (size < p->len) { 414 while (size < p->len) {
425 if (add_kmmio_fault_page(p->addr + size)) 415 if (add_kmmio_fault_page(p->addr + size))
@@ -427,7 +417,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
427 size += PAGE_SIZE; 417 size += PAGE_SIZE;
428 } 418 }
429out: 419out:
430 spin_unlock_irq(&kmmio_lock); 420 spin_unlock_irqrestore(&kmmio_lock, flags);
431 /* 421 /*
432 * XXX: What should I do here? 422 * XXX: What should I do here?
433 * Here was a call to global_flush_tlb(), but it does not exist 423 * Here was a call to global_flush_tlb(), but it does not exist
@@ -478,7 +468,8 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
478 468
479/* 469/*
480 * Remove a kmmio probe. You have to synchronize_rcu() before you can be 470 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
481 * sure that the callbacks will not be called anymore. 471 * sure that the callbacks will not be called anymore. Only after that
472 * you may actually release your struct kmmio_probe.
482 * 473 *
483 * Unregistering a kmmio fault page has three steps: 474 * Unregistering a kmmio fault page has three steps:
484 * 1. release_kmmio_fault_page() 475 * 1. release_kmmio_fault_page()
@@ -490,18 +481,19 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
490 */ 481 */
491void unregister_kmmio_probe(struct kmmio_probe *p) 482void unregister_kmmio_probe(struct kmmio_probe *p)
492{ 483{
484 unsigned long flags;
493 unsigned long size = 0; 485 unsigned long size = 0;
494 struct kmmio_fault_page *release_list = NULL; 486 struct kmmio_fault_page *release_list = NULL;
495 struct kmmio_delayed_release *drelease; 487 struct kmmio_delayed_release *drelease;
496 488
497 spin_lock_irq(&kmmio_lock); 489 spin_lock_irqsave(&kmmio_lock, flags);
498 while (size < p->len) { 490 while (size < p->len) {
499 release_kmmio_fault_page(p->addr + size, &release_list); 491 release_kmmio_fault_page(p->addr + size, &release_list);
500 size += PAGE_SIZE; 492 size += PAGE_SIZE;
501 } 493 }
502 list_del_rcu(&p->list); 494 list_del_rcu(&p->list);
503 kmmio_count--; 495 kmmio_count--;
504 spin_unlock_irq(&kmmio_lock); 496 spin_unlock_irqrestore(&kmmio_lock, flags);
505 497
506 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); 498 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
507 if (!drelease) { 499 if (!drelease) {