diff options
author | Masami Hiramatsu <mhiramat@redhat.com> | 2010-02-25 08:34:15 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-25 11:49:25 -0500 |
commit | b2be84df99ebc93599c69e931a3c4a5105abfabc (patch) | |
tree | 35f720b12bed1cc98c7d261dc3a6af96916faa44 /kernel/kprobes.c | |
parent | afd66255b9a48f5851326ddae50e2203fbf71dc9 (diff) |
kprobes: Jump optimization sysctl interface
Add /proc/sys/debug/kprobes-optimization sysctl which enables
and disables kprobes jump optimization on the fly for debugging.
Changes in v7:
- Remove ctl_name = CTL_UNNUMBERED for upstream compatibility.
Changes in v6:
- Update comments and coding style.
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Cc: systemtap <systemtap@sources.redhat.com>
Cc: DLE <dle-develop@lists.sourceforge.net>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Anders Kaseorg <andersk@ksplice.com>
Cc: Tim Abbott <tabbott@ksplice.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
LKML-Reference: <20100225133415.6725.8274.stgit@localhost6.localdomain6>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 88 |
1 files changed, 85 insertions, 3 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 612af2d61614..fa034d29cf73 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/freezer.h> | 42 | #include <linux/freezer.h> |
43 | #include <linux/seq_file.h> | 43 | #include <linux/seq_file.h> |
44 | #include <linux/debugfs.h> | 44 | #include <linux/debugfs.h> |
45 | #include <linux/sysctl.h> | ||
45 | #include <linux/kdebug.h> | 46 | #include <linux/kdebug.h> |
46 | #include <linux/memory.h> | 47 | #include <linux/memory.h> |
47 | #include <linux/ftrace.h> | 48 | #include <linux/ftrace.h> |
@@ -360,6 +361,9 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) | |||
360 | } | 361 | } |
361 | 362 | ||
362 | #ifdef CONFIG_OPTPROBES | 363 | #ifdef CONFIG_OPTPROBES |
364 | /* NOTE: change this value only with kprobe_mutex held */ | ||
365 | static bool kprobes_allow_optimization; | ||
366 | |||
363 | /* | 367 | /* |
364 | * Call all pre_handler on the list, but ignores its return value. | 368 | * Call all pre_handler on the list, but ignores its return value. |
365 | * This must be called from arch-dep optimized caller. | 369 | * This must be called from arch-dep optimized caller. |
@@ -428,7 +432,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
428 | /* Lock modules while optimizing kprobes */ | 432 | /* Lock modules while optimizing kprobes */ |
429 | mutex_lock(&module_mutex); | 433 | mutex_lock(&module_mutex); |
430 | mutex_lock(&kprobe_mutex); | 434 | mutex_lock(&kprobe_mutex); |
431 | if (kprobes_all_disarmed) | 435 | if (kprobes_all_disarmed || !kprobes_allow_optimization) |
432 | goto end; | 436 | goto end; |
433 | 437 | ||
434 | /* | 438 | /* |
@@ -471,7 +475,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p) | |||
471 | struct optimized_kprobe *op; | 475 | struct optimized_kprobe *op; |
472 | 476 | ||
473 | /* Check if the kprobe is disabled or not ready for optimization. */ | 477 | /* Check if the kprobe is disabled or not ready for optimization. */ |
474 | if (!kprobe_optready(p) || | 478 | if (!kprobe_optready(p) || !kprobes_allow_optimization || |
475 | (kprobe_disabled(p) || kprobes_all_disarmed)) | 479 | (kprobe_disabled(p) || kprobes_all_disarmed)) |
476 | return; | 480 | return; |
477 | 481 | ||
@@ -588,6 +592,80 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) | |||
588 | optimize_kprobe(ap); | 592 | optimize_kprobe(ap); |
589 | } | 593 | } |
590 | 594 | ||
595 | #ifdef CONFIG_SYSCTL | ||
596 | static void __kprobes optimize_all_kprobes(void) | ||
597 | { | ||
598 | struct hlist_head *head; | ||
599 | struct hlist_node *node; | ||
600 | struct kprobe *p; | ||
601 | unsigned int i; | ||
602 | |||
603 | /* If optimization is already allowed, just return */ | ||
604 | if (kprobes_allow_optimization) | ||
605 | return; | ||
606 | |||
607 | kprobes_allow_optimization = true; | ||
608 | mutex_lock(&text_mutex); | ||
609 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
610 | head = &kprobe_table[i]; | ||
611 | hlist_for_each_entry_rcu(p, node, head, hlist) | ||
612 | if (!kprobe_disabled(p)) | ||
613 | optimize_kprobe(p); | ||
614 | } | ||
615 | mutex_unlock(&text_mutex); | ||
616 | printk(KERN_INFO "Kprobes globally optimized\n"); | ||
617 | } | ||
618 | |||
619 | static void __kprobes unoptimize_all_kprobes(void) | ||
620 | { | ||
621 | struct hlist_head *head; | ||
622 | struct hlist_node *node; | ||
623 | struct kprobe *p; | ||
624 | unsigned int i; | ||
625 | |||
626 | /* If optimization is already prohibited, just return */ | ||
627 | if (!kprobes_allow_optimization) | ||
628 | return; | ||
629 | |||
630 | kprobes_allow_optimization = false; | ||
631 | printk(KERN_INFO "Kprobes globally unoptimized\n"); | ||
632 | get_online_cpus(); /* For avoiding text_mutex deadlock */ | ||
633 | mutex_lock(&text_mutex); | ||
634 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
635 | head = &kprobe_table[i]; | ||
636 | hlist_for_each_entry_rcu(p, node, head, hlist) { | ||
637 | if (!kprobe_disabled(p)) | ||
638 | unoptimize_kprobe(p); | ||
639 | } | ||
640 | } | ||
641 | |||
642 | mutex_unlock(&text_mutex); | ||
643 | put_online_cpus(); | ||
644 | /* Allow all currently running kprobes to complete */ | ||
645 | synchronize_sched(); | ||
646 | } | ||
647 | |||
648 | int sysctl_kprobes_optimization; | ||
649 | int proc_kprobes_optimization_handler(struct ctl_table *table, int write, | ||
650 | void __user *buffer, size_t *length, | ||
651 | loff_t *ppos) | ||
652 | { | ||
653 | int ret; | ||
654 | |||
655 | mutex_lock(&kprobe_mutex); | ||
656 | sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; | ||
657 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); | ||
658 | |||
659 | if (sysctl_kprobes_optimization) | ||
660 | optimize_all_kprobes(); | ||
661 | else | ||
662 | unoptimize_all_kprobes(); | ||
663 | mutex_unlock(&kprobe_mutex); | ||
664 | |||
665 | return ret; | ||
666 | } | ||
667 | #endif /* CONFIG_SYSCTL */ | ||
668 | |||
591 | static void __kprobes __arm_kprobe(struct kprobe *p) | 669 | static void __kprobes __arm_kprobe(struct kprobe *p) |
592 | { | 670 | { |
593 | struct kprobe *old_p; | 671 | struct kprobe *old_p; |
@@ -1610,10 +1688,14 @@ static int __init init_kprobes(void) | |||
1610 | } | 1688 | } |
1611 | } | 1689 | } |
1612 | 1690 | ||
1613 | #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT) | 1691 | #if defined(CONFIG_OPTPROBES) |
1692 | #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) | ||
1614 | /* Init kprobe_optinsn_slots */ | 1693 | /* Init kprobe_optinsn_slots */ |
1615 | kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; | 1694 | kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; |
1616 | #endif | 1695 | #endif |
1696 | /* By default, kprobes can be optimized */ | ||
1697 | kprobes_allow_optimization = true; | ||
1698 | #endif | ||
1617 | 1699 | ||
1618 | /* By default, kprobes are armed */ | 1700 | /* By default, kprobes are armed */ |
1619 | kprobes_all_disarmed = false; | 1701 | kprobes_all_disarmed = false; |