diff options
author | Keshavamurthy Anil S <anil.s.keshavamurthy@intel.com> | 2006-01-11 15:17:41 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-11 21:42:12 -0500 |
commit | df019b1d8b893d0f0ee5a9b0f71486f0892561ae (patch) | |
tree | 9d2ced14291502af1ca687b5d854d8394cbfb84d /kernel | |
parent | ef43bc4fc32bec8fda7bae8948b774616dc9e496 (diff) |
[PATCH] kprobes: fix unloading of self probed module
When a kprobes modules is written in such a way that probes are inserted on
itself, then unload of that moudle was not possible due to reference
couning on the same module.
The below patch makes a check and incrementes the module refcount only if
it is not a self probed module.
We need to allow modules to probe themself for kprobes performance
measurements
This patch has been tested on several x86_64, ppc64 and IA64 architectures.
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kprobes.c | 42 |
1 files changed, 32 insertions, 10 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 34a885bb82e0..3ea6325228da 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -449,19 +449,32 @@ static int __kprobes in_kprobes_functions(unsigned long addr) | |||
449 | return 0; | 449 | return 0; |
450 | } | 450 | } |
451 | 451 | ||
452 | int __kprobes register_kprobe(struct kprobe *p) | 452 | static int __kprobes __register_kprobe(struct kprobe *p, |
453 | unsigned long called_from) | ||
453 | { | 454 | { |
454 | int ret = 0; | 455 | int ret = 0; |
455 | struct kprobe *old_p; | 456 | struct kprobe *old_p; |
456 | struct module *mod; | 457 | struct module *probed_mod; |
457 | 458 | ||
458 | if ((!kernel_text_address((unsigned long) p->addr)) || | 459 | if ((!kernel_text_address((unsigned long) p->addr)) || |
459 | in_kprobes_functions((unsigned long) p->addr)) | 460 | in_kprobes_functions((unsigned long) p->addr)) |
460 | return -EINVAL; | 461 | return -EINVAL; |
461 | 462 | ||
462 | if ((mod = module_text_address((unsigned long) p->addr)) && | 463 | p->mod_refcounted = 0; |
463 | (unlikely(!try_module_get(mod)))) | 464 | /* Check are we probing a module */ |
464 | return -EINVAL; | 465 | if ((probed_mod = module_text_address((unsigned long) p->addr))) { |
466 | struct module *calling_mod = module_text_address(called_from); | ||
467 | /* We must allow modules to probe themself and | ||
468 | * in this case avoid incrementing the module refcount, | ||
469 | * so as to allow unloading of self probing modules. | ||
470 | */ | ||
471 | if (calling_mod && (calling_mod != probed_mod)) { | ||
472 | if (unlikely(!try_module_get(probed_mod))) | ||
473 | return -EINVAL; | ||
474 | p->mod_refcounted = 1; | ||
475 | } else | ||
476 | probed_mod = NULL; | ||
477 | } | ||
465 | 478 | ||
466 | p->nmissed = 0; | 479 | p->nmissed = 0; |
467 | down(&kprobe_mutex); | 480 | down(&kprobe_mutex); |
@@ -483,11 +496,17 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
483 | out: | 496 | out: |
484 | up(&kprobe_mutex); | 497 | up(&kprobe_mutex); |
485 | 498 | ||
486 | if (ret && mod) | 499 | if (ret && probed_mod) |
487 | module_put(mod); | 500 | module_put(probed_mod); |
488 | return ret; | 501 | return ret; |
489 | } | 502 | } |
490 | 503 | ||
504 | int __kprobes register_kprobe(struct kprobe *p) | ||
505 | { | ||
506 | return __register_kprobe(p, | ||
507 | (unsigned long)__builtin_return_address(0)); | ||
508 | } | ||
509 | |||
491 | void __kprobes unregister_kprobe(struct kprobe *p) | 510 | void __kprobes unregister_kprobe(struct kprobe *p) |
492 | { | 511 | { |
493 | struct module *mod; | 512 | struct module *mod; |
@@ -524,7 +543,8 @@ valid_p: | |||
524 | up(&kprobe_mutex); | 543 | up(&kprobe_mutex); |
525 | 544 | ||
526 | synchronize_sched(); | 545 | synchronize_sched(); |
527 | if ((mod = module_text_address((unsigned long)p->addr))) | 546 | if (p->mod_refcounted && |
547 | (mod = module_text_address((unsigned long)p->addr))) | ||
528 | module_put(mod); | 548 | module_put(mod); |
529 | 549 | ||
530 | if (cleanup_p) { | 550 | if (cleanup_p) { |
@@ -547,7 +567,8 @@ int __kprobes register_jprobe(struct jprobe *jp) | |||
547 | jp->kp.pre_handler = setjmp_pre_handler; | 567 | jp->kp.pre_handler = setjmp_pre_handler; |
548 | jp->kp.break_handler = longjmp_break_handler; | 568 | jp->kp.break_handler = longjmp_break_handler; |
549 | 569 | ||
550 | return register_kprobe(&jp->kp); | 570 | return __register_kprobe(&jp->kp, |
571 | (unsigned long)__builtin_return_address(0)); | ||
551 | } | 572 | } |
552 | 573 | ||
553 | void __kprobes unregister_jprobe(struct jprobe *jp) | 574 | void __kprobes unregister_jprobe(struct jprobe *jp) |
@@ -587,7 +608,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
587 | 608 | ||
588 | rp->nmissed = 0; | 609 | rp->nmissed = 0; |
589 | /* Establish function entry probe point */ | 610 | /* Establish function entry probe point */ |
590 | if ((ret = register_kprobe(&rp->kp)) != 0) | 611 | if ((ret = __register_kprobe(&rp->kp, |
612 | (unsigned long)__builtin_return_address(0))) != 0) | ||
591 | free_rp_inst(rp); | 613 | free_rp_inst(rp); |
592 | return ret; | 614 | return ret; |
593 | } | 615 | } |