diff options
author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2012-06-05 06:28:32 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2012-07-31 10:29:58 -0400 |
commit | ae6aa16fdc163afe6b04b6c073ad4ddd4663c03b (patch) | |
tree | 0a6055f56f80561c4a92df5ea50628644db73e7e /include/linux/kprobes.h | |
parent | 4dc936769e8a6382a4cc12375e8a4daa2b829fda (diff) |
kprobes: introduce ftrace based optimization
Introduce function trace based kprobes optimization.
With using ftrace optimization, kprobes on the mcount calling
address, use ftrace's mcount call instead of breakpoint.
Furthermore, this optimization works with preemptive kernel
not like as current jump-based optimization. Of cource,
this feature works only if the probe is on mcount call.
Only if kprobe.break_handler is set, that probe is not
optimized with ftrace (nor put on ftrace). The reason why this
limitation comes is that this break_handler may be used only
from jprobes which changes ip address (for fetching the function
arguments), but function tracer ignores modified ip address.
Changes in v2:
- Fix ftrace_ops registering right after setting its filter.
- Unregister ftrace_ops if there is no kprobe using.
- Remove notrace dependency from __kprobes macro.
Link: http://lkml.kernel.org/r/20120605102832.27845.63461.stgit@localhost.localdomain
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: "Frank Ch. Eigler" <fche@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/linux/kprobes.h')
-rw-r--r-- | include/linux/kprobes.h | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index b6e1f8c00577..aa0d05e852e3 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
39 | #include <linux/rcupdate.h> | 39 | #include <linux/rcupdate.h> |
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <linux/ftrace.h> | ||
41 | 42 | ||
42 | #ifdef CONFIG_KPROBES | 43 | #ifdef CONFIG_KPROBES |
43 | #include <asm/kprobes.h> | 44 | #include <asm/kprobes.h> |
@@ -48,14 +49,26 @@ | |||
48 | #define KPROBE_REENTER 0x00000004 | 49 | #define KPROBE_REENTER 0x00000004 |
49 | #define KPROBE_HIT_SSDONE 0x00000008 | 50 | #define KPROBE_HIT_SSDONE 0x00000008 |
50 | 51 | ||
52 | /* | ||
53 | * If function tracer is enabled and the arch supports full | ||
54 | * passing of pt_regs to function tracing, then kprobes can | ||
55 | * optimize on top of function tracing. | ||
56 | */ | ||
57 | #if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \ | ||
58 | && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE) | ||
59 | # define KPROBES_CAN_USE_FTRACE | ||
60 | #endif | ||
61 | |||
51 | /* Attach to insert probes on any functions which should be ignored*/ | 62 | /* Attach to insert probes on any functions which should be ignored*/ |
52 | #define __kprobes __attribute__((__section__(".kprobes.text"))) | 63 | #define __kprobes __attribute__((__section__(".kprobes.text"))) |
64 | |||
53 | #else /* CONFIG_KPROBES */ | 65 | #else /* CONFIG_KPROBES */ |
54 | typedef int kprobe_opcode_t; | 66 | typedef int kprobe_opcode_t; |
55 | struct arch_specific_insn { | 67 | struct arch_specific_insn { |
56 | int dummy; | 68 | int dummy; |
57 | }; | 69 | }; |
58 | #define __kprobes | 70 | #define __kprobes |
71 | |||
59 | #endif /* CONFIG_KPROBES */ | 72 | #endif /* CONFIG_KPROBES */ |
60 | 73 | ||
61 | struct kprobe; | 74 | struct kprobe; |
@@ -128,6 +141,7 @@ struct kprobe { | |||
128 | * NOTE: | 141 | * NOTE: |
129 | * this flag is only for optimized_kprobe. | 142 | * this flag is only for optimized_kprobe. |
130 | */ | 143 | */ |
144 | #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */ | ||
131 | 145 | ||
132 | /* Has this kprobe gone ? */ | 146 | /* Has this kprobe gone ? */ |
133 | static inline int kprobe_gone(struct kprobe *p) | 147 | static inline int kprobe_gone(struct kprobe *p) |
@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p) | |||
146 | { | 160 | { |
147 | return p->flags & KPROBE_FLAG_OPTIMIZED; | 161 | return p->flags & KPROBE_FLAG_OPTIMIZED; |
148 | } | 162 | } |
163 | |||
164 | /* Is this kprobe uses ftrace ? */ | ||
165 | static inline int kprobe_ftrace(struct kprobe *p) | ||
166 | { | ||
167 | return p->flags & KPROBE_FLAG_FTRACE; | ||
168 | } | ||
169 | |||
149 | /* | 170 | /* |
150 | * Special probe type that uses setjmp-longjmp type tricks to resume | 171 | * Special probe type that uses setjmp-longjmp type tricks to resume |
151 | * execution at a specified entry with a matching prototype corresponding | 172 | * execution at a specified entry with a matching prototype corresponding |
@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, | |||
295 | #endif | 316 | #endif |
296 | 317 | ||
297 | #endif /* CONFIG_OPTPROBES */ | 318 | #endif /* CONFIG_OPTPROBES */ |
319 | #ifdef KPROBES_CAN_USE_FTRACE | ||
320 | extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | ||
321 | struct pt_regs *regs); | ||
322 | extern int arch_prepare_kprobe_ftrace(struct kprobe *p); | ||
323 | #endif | ||
324 | |||
298 | 325 | ||
299 | /* Get the kprobe at this addr (if any) - called with preemption disabled */ | 326 | /* Get the kprobe at this addr (if any) - called with preemption disabled */ |
300 | struct kprobe *get_kprobe(void *addr); | 327 | struct kprobe *get_kprobe(void *addr); |