diff options
author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2010-12-03 04:54:34 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-12-06 11:59:32 -0500 |
commit | f984ba4eb575e4a27ed28a76d4126d2aa9233c32 (patch) | |
tree | 0df5f1510537edac10ae9cc1e5572d70dcf0a8c7 /kernel/kprobes.c | |
parent | cd7ebe2298ff1c3112232878678ce5fe6be8a15b (diff) |
kprobes: Use text_poke_smp_batch for unoptimizing
Use text_poke_smp_batch() on unoptimization path for reducing
the number of stop_machine() issues. If the number of
unoptimizing probes is more than MAX_OPTIMIZE_PROBES(=256),
kprobes unoptimizes first MAX_OPTIMIZE_PROBES probes and kicks
optimizer for remaining probes.
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: 2nddept-manager@sdl.hitachi.co.jp
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <20101203095434.2961.22657.stgit@ltc236.sdl.hitachi.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 531e1016483..7663e5df0e6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -517,9 +517,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | |||
517 | /* Ditto to do_optimize_kprobes */ | 517 | /* Ditto to do_optimize_kprobes */ |
518 | get_online_cpus(); | 518 | get_online_cpus(); |
519 | mutex_lock(&text_mutex); | 519 | mutex_lock(&text_mutex); |
520 | list_for_each_entry_safe(op, tmp, &unoptimizing_list, list) { | 520 | arch_unoptimize_kprobes(&unoptimizing_list, free_list); |
521 | /* Unoptimize kprobes */ | 521 | /* Loop free_list for disarming */ |
522 | arch_unoptimize_kprobe(op); | 522 | list_for_each_entry_safe(op, tmp, free_list, list) { |
523 | /* Disarm probes if marked disabled */ | 523 | /* Disarm probes if marked disabled */ |
524 | if (kprobe_disabled(&op->kp)) | 524 | if (kprobe_disabled(&op->kp)) |
525 | arch_disarm_kprobe(&op->kp); | 525 | arch_disarm_kprobe(&op->kp); |
@@ -530,8 +530,6 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | |||
530 | * (reclaiming is done by do_free_cleaned_kprobes.) | 530 | * (reclaiming is done by do_free_cleaned_kprobes.) |
531 | */ | 531 | */ |
532 | hlist_del_rcu(&op->kp.hlist); | 532 | hlist_del_rcu(&op->kp.hlist); |
533 | /* Move only unused probes on free_list */ | ||
534 | list_move(&op->list, free_list); | ||
535 | } else | 533 | } else |
536 | list_del_init(&op->list); | 534 | list_del_init(&op->list); |
537 | } | 535 | } |
@@ -592,7 +590,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
592 | mutex_unlock(&module_mutex); | 590 | mutex_unlock(&module_mutex); |
593 | 591 | ||
594 | /* Step 5: Kick optimizer again if needed */ | 592 | /* Step 5: Kick optimizer again if needed */ |
595 | if (!list_empty(&optimizing_list)) | 593 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) |
596 | kick_kprobe_optimizer(); | 594 | kick_kprobe_optimizer(); |
597 | else | 595 | else |
598 | /* Wake up all waiters */ | 596 | /* Wake up all waiters */ |