diff options
-rw-r--r-- | kernel/kprobes.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 098f396aa409..f230e81a9db6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -471,7 +471,6 @@ static LIST_HEAD(unoptimizing_list); | |||
471 | 471 | ||
472 | static void kprobe_optimizer(struct work_struct *work); | 472 | static void kprobe_optimizer(struct work_struct *work); |
473 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); | 473 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
474 | static DECLARE_COMPLETION(optimizer_comp); | ||
475 | #define OPTIMIZE_DELAY 5 | 474 | #define OPTIMIZE_DELAY 5 |
476 | 475 | ||
477 | /* | 476 | /* |
@@ -552,8 +551,7 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) | |||
552 | /* Start optimizer after OPTIMIZE_DELAY passed */ | 551 | /* Start optimizer after OPTIMIZE_DELAY passed */ |
553 | static __kprobes void kick_kprobe_optimizer(void) | 552 | static __kprobes void kick_kprobe_optimizer(void) |
554 | { | 553 | { |
555 | if (!delayed_work_pending(&optimizing_work)) | 554 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); |
556 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); | ||
557 | } | 555 | } |
558 | 556 | ||
559 | /* Kprobe jump optimizer */ | 557 | /* Kprobe jump optimizer */ |
@@ -592,16 +590,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
592 | /* Step 5: Kick optimizer again if needed */ | 590 | /* Step 5: Kick optimizer again if needed */ |
593 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) | 591 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) |
594 | kick_kprobe_optimizer(); | 592 | kick_kprobe_optimizer(); |
595 | else | ||
596 | /* Wake up all waiters */ | ||
597 | complete_all(&optimizer_comp); | ||
598 | } | 593 | } |
599 | 594 | ||
600 | /* Wait for completing optimization and unoptimization */ | 595 | /* Wait for completing optimization and unoptimization */ |
601 | static __kprobes void wait_for_kprobe_optimizer(void) | 596 | static __kprobes void wait_for_kprobe_optimizer(void) |
602 | { | 597 | { |
603 | if (delayed_work_pending(&optimizing_work)) | 598 | mutex_lock(&kprobe_mutex); |
604 | wait_for_completion(&optimizer_comp); | 599 | |
600 | while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { | ||
601 | mutex_unlock(&kprobe_mutex); | ||
602 | |||
603 | /* this will also make optimizing_work execute immmediately */ | ||
604 | flush_delayed_work(&optimizing_work); | ||
605 | /* @optimizing_work might not have been queued yet, relax */ | ||
606 | cpu_relax(); | ||
607 | |||
608 | mutex_lock(&kprobe_mutex); | ||
609 | } | ||
610 | |||
611 | mutex_unlock(&kprobe_mutex); | ||
605 | } | 612 | } |
606 | 613 | ||
607 | /* Optimize kprobe if p is ready to be optimized */ | 614 | /* Optimize kprobe if p is ready to be optimized */ |