aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2010-12-03 04:54:28 -0500
committerIngo Molnar <mingo@elte.hu>2010-12-06 11:59:31 -0500
commitcd7ebe2298ff1c3112232878678ce5fe6be8a15b (patch)
tree7bac7adf40ce2141e779b7d99b2784279c2dc45c /kernel/kprobes.c
parent7deb18dcf0478940ac979de002db1ed8ba6531dc (diff)
kprobes: Use text_poke_smp_batch for optimizing
Use text_poke_smp_batch() in optimization path for reducing the number of stop_machine() issues. If the number of optimizing probes is more than MAX_OPTIMIZE_PROBES(=256), kprobes optimizes first MAX_OPTIMIZE_PROBES probes and kicks optimizer for remaining probes. Changes in v5: - Use kick_kprobe_optimizer() instead of directly calling schedule_delayed_work(). - Rescheduling optimizer outside of kprobe mutex lock. Changes in v2: - Allocate code buffer and parameters in arch_init_kprobes() instead of using static arraies. - Merge previous max optimization limit patch into this patch. So, this patch introduces upper limit of optimization at once. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: 2nddept-manager@sdl.hitachi.co.jp Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <20101203095428.2961.8994.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 134754d18bb4..531e10164836 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -480,8 +480,6 @@ static DECLARE_COMPLETION(optimizer_comp);
480 */ 480 */
481static __kprobes void do_optimize_kprobes(void) 481static __kprobes void do_optimize_kprobes(void)
482{ 482{
483 struct optimized_kprobe *op, *tmp;
484
485 /* Optimization never be done when disarmed */ 483 /* Optimization never be done when disarmed */
486 if (kprobes_all_disarmed || !kprobes_allow_optimization || 484 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
487 list_empty(&optimizing_list)) 485 list_empty(&optimizing_list))
@@ -499,12 +497,7 @@ static __kprobes void do_optimize_kprobes(void)
499 */ 497 */
500 get_online_cpus(); 498 get_online_cpus();
501 mutex_lock(&text_mutex); 499 mutex_lock(&text_mutex);
502 list_for_each_entry_safe(op, tmp, &optimizing_list, list) { 500 arch_optimize_kprobes(&optimizing_list);
503 WARN_ON(kprobe_disabled(&op->kp));
504 if (arch_optimize_kprobe(op) < 0)
505 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
506 list_del_init(&op->list);
507 }
508 mutex_unlock(&text_mutex); 501 mutex_unlock(&text_mutex);
509 put_online_cpus(); 502 put_online_cpus();
510} 503}
@@ -598,8 +591,12 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
598 mutex_unlock(&kprobe_mutex); 591 mutex_unlock(&kprobe_mutex);
599 mutex_unlock(&module_mutex); 592 mutex_unlock(&module_mutex);
600 593
601 /* Wake up all waiters */ 594 /* Step 5: Kick optimizer again if needed */
602 complete_all(&optimizer_comp); 595 if (!list_empty(&optimizing_list))
596 kick_kprobe_optimizer();
597 else
598 /* Wake up all waiters */
599 complete_all(&optimizer_comp);
603} 600}
604 601
605/* Wait for completing optimization and unoptimization */ 602/* Wait for completing optimization and unoptimization */