diff options
author | bibo,mao <bibo.mao@intel.com> | 2006-10-02 05:17:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-02 10:57:16 -0400 |
commit | 99219a3fbc2dcf2eaa954f7b2ac27299fd7894cd (patch) | |
tree | 895abde156c9fbeea9c5a87cfaaa411d4ad175c6 /kernel/kprobes.c | |
parent | f2aa85a0ccd90110e76c6375535adc3ae358f971 (diff) |
[PATCH] kretprobe spinlock deadlock patch
kprobe_flush_task() possibly calls kfree function during holding
kretprobe_lock spinlock, if kfree function is probed by kretprobe that will
incur spinlock deadlock. This patch moves kfree function out scope of
kretprobe_lock.
Signed-off-by: bibo, mao <bibo.mao@intel.com>
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 41dfda50e22a..610c837ad9e0 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -319,7 +319,8 @@ void __kprobes add_rp_inst(struct kretprobe_instance *ri) | |||
319 | } | 319 | } |
320 | 320 | ||
321 | /* Called with kretprobe_lock held */ | 321 | /* Called with kretprobe_lock held */ |
322 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) | 322 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, |
323 | struct hlist_head *head) | ||
323 | { | 324 | { |
324 | /* remove rp inst off the rprobe_inst_table */ | 325 | /* remove rp inst off the rprobe_inst_table */ |
325 | hlist_del(&ri->hlist); | 326 | hlist_del(&ri->hlist); |
@@ -331,7 +332,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) | |||
331 | hlist_add_head(&ri->uflist, &ri->rp->free_instances); | 332 | hlist_add_head(&ri->uflist, &ri->rp->free_instances); |
332 | } else | 333 | } else |
333 | /* Unregistering */ | 334 | /* Unregistering */ |
334 | kfree(ri); | 335 | hlist_add_head(&ri->hlist, head); |
335 | } | 336 | } |
336 | 337 | ||
337 | struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) | 338 | struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) |
@@ -348,17 +349,23 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) | |||
348 | void __kprobes kprobe_flush_task(struct task_struct *tk) | 349 | void __kprobes kprobe_flush_task(struct task_struct *tk) |
349 | { | 350 | { |
350 | struct kretprobe_instance *ri; | 351 | struct kretprobe_instance *ri; |
351 | struct hlist_head *head; | 352 | struct hlist_head *head, empty_rp; |
352 | struct hlist_node *node, *tmp; | 353 | struct hlist_node *node, *tmp; |
353 | unsigned long flags = 0; | 354 | unsigned long flags = 0; |
354 | 355 | ||
356 | INIT_HLIST_HEAD(&empty_rp); | ||
355 | spin_lock_irqsave(&kretprobe_lock, flags); | 357 | spin_lock_irqsave(&kretprobe_lock, flags); |
356 | head = kretprobe_inst_table_head(tk); | 358 | head = kretprobe_inst_table_head(tk); |
357 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | 359 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
358 | if (ri->task == tk) | 360 | if (ri->task == tk) |
359 | recycle_rp_inst(ri); | 361 | recycle_rp_inst(ri, &empty_rp); |
360 | } | 362 | } |
361 | spin_unlock_irqrestore(&kretprobe_lock, flags); | 363 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
364 | |||
365 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | ||
366 | hlist_del(&ri->hlist); | ||
367 | kfree(ri); | ||
368 | } | ||
362 | } | 369 | } |
363 | 370 | ||
364 | static inline void free_rp_inst(struct kretprobe *rp) | 371 | static inline void free_rp_inst(struct kretprobe *rp) |