aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2010-12-03 04:54:03 -0500
committerIngo Molnar <mingo@elte.hu>2010-12-06 11:59:30 -0500
commit61f4e13ffd85c037a942c5b7fd13f2b0c3162862 (patch)
tree29ce3816a1fbbed40005df664652e040d88117db /kernel/kprobes.c
parent6f0f1dd71953d4243c11e490dd49ef24ebaf6c0b (diff)
kprobes: Separate kprobe optimizing code from optimizer
Separate kprobe optimizing code from optimizer, this will make easy to introducing unoptimizing code in optimizer. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: 2nddept-manager@sdl.hitachi.co.jp LKML-Reference: <20101203095403.2961.91201.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c43
1 files changed, 26 insertions, 17 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ab99caf2b167..1f4f9b9d5c89 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -427,26 +427,14 @@ static void kprobe_optimizer(struct work_struct *work);
427static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 427static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
428#define OPTIMIZE_DELAY 5 428#define OPTIMIZE_DELAY 5
429 429
430/* Kprobe jump optimizer */ 430/*
431static __kprobes void kprobe_optimizer(struct work_struct *work) 431 * Optimize (replace a breakpoint with a jump) kprobes listed on
432 * optimizing_list.
433 */
434static __kprobes void do_optimize_kprobes(void)
432{ 435{
433 struct optimized_kprobe *op, *tmp; 436 struct optimized_kprobe *op, *tmp;
434 437
435 /* Lock modules while optimizing kprobes */
436 mutex_lock(&module_mutex);
437 mutex_lock(&kprobe_mutex);
438 if (kprobes_all_disarmed || !kprobes_allow_optimization)
439 goto end;
440
441 /*
442 * Wait for quiesence period to ensure all running interrupts
443 * are done. Because optprobe may modify multiple instructions
444 * there is a chance that Nth instruction is interrupted. In that
445 * case, running interrupt can return to 2nd-Nth byte of jump
446 * instruction. This wait is for avoiding it.
447 */
448 synchronize_sched();
449
450 /* 438 /*
451 * The optimization/unoptimization refers online_cpus via 439 * The optimization/unoptimization refers online_cpus via
452 * stop_machine() and cpu-hotplug modifies online_cpus. 440 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -467,6 +455,27 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
467 } 455 }
468 mutex_unlock(&text_mutex); 456 mutex_unlock(&text_mutex);
469 put_online_cpus(); 457 put_online_cpus();
458}
459
460/* Kprobe jump optimizer */
461static __kprobes void kprobe_optimizer(struct work_struct *work)
462{
463 /* Lock modules while optimizing kprobes */
464 mutex_lock(&module_mutex);
465 mutex_lock(&kprobe_mutex);
466 if (kprobes_all_disarmed || !kprobes_allow_optimization)
467 goto end;
468
469 /*
470 * Wait for quiesence period to ensure all running interrupts
471 * are done. Because optprobe may modify multiple instructions
472 * there is a chance that Nth instruction is interrupted. In that
473 * case, running interrupt can return to 2nd-Nth byte of jump
474 * instruction. This wait is for avoiding it.
475 */
476 synchronize_sched();
477
478 do_optimize_kprobes();
470end: 479end:
471 mutex_unlock(&kprobe_mutex); 480 mutex_unlock(&kprobe_mutex);
472 mutex_unlock(&module_mutex); 481 mutex_unlock(&module_mutex);