aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-12-21 20:57:00 -0500
committerTejun Heo <tj@kernel.org>2013-02-09 14:32:42 -0500
commitad72b3bea744b4db01c89af0f86f3e8920d354df (patch)
tree8482593d6330783150e88ff7181a1484d3156265 /kernel/kprobes.c
parent7c99e0bf86fdc1dee238eb6e213b980f887b68f1 (diff)
kprobes: fix wait_for_kprobe_optimizer()
wait_for_kprobe_optimizer() seems largely broken. It uses optimizer_comp which is never re-initialized, so wait_for_kprobe_optimizer() will never wait for anything once kprobe_optimizer() finishes all pending jobs for the first time. Also, aside from completion, delayed_work_pending() is %false once kprobe_optimizer() starts execution and wait_for_kprobe_optimizer() won't wait for it. Reimplement it so that it flushes optimizing_work until [un]optimizing_lists are empty. Note that this also makes optimizing_work execute immediately if someone's waiting for it, which is the nicer behavior. Only compile tested. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: "David S. Miller" <davem@davemloft.net>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 098f396aa409..f230e81a9db6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -471,7 +471,6 @@ static LIST_HEAD(unoptimizing_list);
471 471
472static void kprobe_optimizer(struct work_struct *work); 472static void kprobe_optimizer(struct work_struct *work);
473static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 473static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
474static DECLARE_COMPLETION(optimizer_comp);
475#define OPTIMIZE_DELAY 5 474#define OPTIMIZE_DELAY 5
476 475
477/* 476/*
@@ -552,8 +551,7 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
552/* Start optimizer after OPTIMIZE_DELAY passed */ 551/* Start optimizer after OPTIMIZE_DELAY passed */
553static __kprobes void kick_kprobe_optimizer(void) 552static __kprobes void kick_kprobe_optimizer(void)
554{ 553{
555 if (!delayed_work_pending(&optimizing_work)) 554 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
556 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
557} 555}
558 556
559/* Kprobe jump optimizer */ 557/* Kprobe jump optimizer */
@@ -592,16 +590,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
592 /* Step 5: Kick optimizer again if needed */ 590 /* Step 5: Kick optimizer again if needed */
593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 591 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
594 kick_kprobe_optimizer(); 592 kick_kprobe_optimizer();
595 else
596 /* Wake up all waiters */
597 complete_all(&optimizer_comp);
598} 593}
599 594
600/* Wait for completing optimization and unoptimization */ 595/* Wait for completing optimization and unoptimization */
601static __kprobes void wait_for_kprobe_optimizer(void) 596static __kprobes void wait_for_kprobe_optimizer(void)
602{ 597{
603 if (delayed_work_pending(&optimizing_work)) 598 mutex_lock(&kprobe_mutex);
604 wait_for_completion(&optimizer_comp); 599
600 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
601 mutex_unlock(&kprobe_mutex);
602
603 /* this will also make optimizing_work execute immmediately */
604 flush_delayed_work(&optimizing_work);
605 /* @optimizing_work might not have been queued yet, relax */
606 cpu_relax();
607
608 mutex_lock(&kprobe_mutex);
609 }
610
611 mutex_unlock(&kprobe_mutex);
605} 612}
606 613
607/* Optimize kprobe if p is ready to be optimized */ 614/* Optimize kprobe if p is ready to be optimized */