diff options
author | Christoph Hellwig <hch@lst.de> | 2007-05-08 03:34:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 14:15:19 -0400 |
commit | 4c4308cb93450989846ac49faeb6dab943e7657e (patch) | |
tree | c06092cae6f95a243cdd758d07491cf5fa24a1dd /kernel | |
parent | 6f716acd5fa20ae6a35ab29ae37fa9189e839ed5 (diff) |
kprobes: kretprobes simplifications
- consolidate duplicate code in all arch_prepare_kretprobe instances
into common code
- replace various odd helpers that use hlist_for_each_entry to get
the first elemenet of a list with either a hlist_for_each_entry_save
or an opencoded access to the first element in the caller
- inline add_rp_inst into it's only remaining caller
- use kretprobe_inst_table_head instead of opencoding it
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kprobes.c | 64 |
1 files changed, 21 insertions, 43 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 22857003a65b..f58f171bd65f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -358,46 +358,6 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) | |||
358 | } | 358 | } |
359 | 359 | ||
360 | /* Called with kretprobe_lock held */ | 360 | /* Called with kretprobe_lock held */ |
361 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) | ||
362 | { | ||
363 | struct hlist_node *node; | ||
364 | struct kretprobe_instance *ri; | ||
365 | hlist_for_each_entry(ri, node, &rp->free_instances, uflist) | ||
366 | return ri; | ||
367 | return NULL; | ||
368 | } | ||
369 | |||
370 | /* Called with kretprobe_lock held */ | ||
371 | static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe | ||
372 | *rp) | ||
373 | { | ||
374 | struct hlist_node *node; | ||
375 | struct kretprobe_instance *ri; | ||
376 | hlist_for_each_entry(ri, node, &rp->used_instances, uflist) | ||
377 | return ri; | ||
378 | return NULL; | ||
379 | } | ||
380 | |||
381 | /* Called with kretprobe_lock held */ | ||
382 | void __kprobes add_rp_inst(struct kretprobe_instance *ri) | ||
383 | { | ||
384 | /* | ||
385 | * Remove rp inst off the free list - | ||
386 | * Add it back when probed function returns | ||
387 | */ | ||
388 | hlist_del(&ri->uflist); | ||
389 | |||
390 | /* Add rp inst onto table */ | ||
391 | INIT_HLIST_NODE(&ri->hlist); | ||
392 | hlist_add_head(&ri->hlist, | ||
393 | &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]); | ||
394 | |||
395 | /* Also add this rp inst to the used list. */ | ||
396 | INIT_HLIST_NODE(&ri->uflist); | ||
397 | hlist_add_head(&ri->uflist, &ri->rp->used_instances); | ||
398 | } | ||
399 | |||
400 | /* Called with kretprobe_lock held */ | ||
401 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | 361 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, |
402 | struct hlist_head *head) | 362 | struct hlist_head *head) |
403 | { | 363 | { |
@@ -450,7 +410,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
450 | static inline void free_rp_inst(struct kretprobe *rp) | 410 | static inline void free_rp_inst(struct kretprobe *rp) |
451 | { | 411 | { |
452 | struct kretprobe_instance *ri; | 412 | struct kretprobe_instance *ri; |
453 | while ((ri = get_free_rp_inst(rp)) != NULL) { | 413 | struct hlist_node *pos, *next; |
414 | |||
415 | hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) { | ||
454 | hlist_del(&ri->uflist); | 416 | hlist_del(&ri->uflist); |
455 | kfree(ri); | 417 | kfree(ri); |
456 | } | 418 | } |
@@ -732,7 +694,21 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
732 | 694 | ||
733 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | 695 | /*TODO: consider to only swap the RA after the last pre_handler fired */ |
734 | spin_lock_irqsave(&kretprobe_lock, flags); | 696 | spin_lock_irqsave(&kretprobe_lock, flags); |
735 | arch_prepare_kretprobe(rp, regs); | 697 | if (!hlist_empty(&rp->free_instances)) { |
698 | struct kretprobe_instance *ri; | ||
699 | |||
700 | ri = hlist_entry(rp->free_instances.first, | ||
701 | struct kretprobe_instance, uflist); | ||
702 | ri->rp = rp; | ||
703 | ri->task = current; | ||
704 | arch_prepare_kretprobe(ri, regs); | ||
705 | |||
706 | /* XXX(hch): why is there no hlist_move_head? */ | ||
707 | hlist_del(&ri->uflist); | ||
708 | hlist_add_head(&ri->uflist, &ri->rp->used_instances); | ||
709 | hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task)); | ||
710 | } else | ||
711 | rp->nmissed++; | ||
736 | spin_unlock_irqrestore(&kretprobe_lock, flags); | 712 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
737 | return 0; | 713 | return 0; |
738 | } | 714 | } |
@@ -795,11 +771,13 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp) | |||
795 | { | 771 | { |
796 | unsigned long flags; | 772 | unsigned long flags; |
797 | struct kretprobe_instance *ri; | 773 | struct kretprobe_instance *ri; |
774 | struct hlist_node *pos, *next; | ||
798 | 775 | ||
799 | unregister_kprobe(&rp->kp); | 776 | unregister_kprobe(&rp->kp); |
777 | |||
800 | /* No race here */ | 778 | /* No race here */ |
801 | spin_lock_irqsave(&kretprobe_lock, flags); | 779 | spin_lock_irqsave(&kretprobe_lock, flags); |
802 | while ((ri = get_used_rp_inst(rp)) != NULL) { | 780 | hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { |
803 | ri->rp = NULL; | 781 | ri->rp = NULL; |
804 | hlist_del(&ri->uflist); | 782 | hlist_del(&ri->uflist); |
805 | } | 783 | } |