aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMasami Hiramatsu <mhiramat@kernel.org>2018-07-30 06:20:14 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-07-30 18:28:52 -0400
commit45408c4f92506dbdfef1721f2613e1426de00894 (patch)
tree0deca678bf0954a7224acd62f4a805d8d6eb1788
parent518eeca05c08347f0a69966b4459cc2d152af959 (diff)
tracing: kprobes: Prohibit probing on notrace function
Prohibit kprobe-events probing on notrace functions. Since probing on a notrace function can cause a recursive event call. In most cases those are just skipped, but in some cases it falls into an infinite recursive call. This protection can be disabled by the kconfig CONFIG_KPROBE_EVENTS_ON_NOTRACE=y, but it is highly recommended to keep it "n" for normal kernel builds. Note that this is only available if "kprobes on ftrace" has been implemented on the target arch and CONFIG_KPROBES_ON_FTRACE=y. Link: http://lkml.kernel.org/r/153294601436.32740.10557881188933661239.stgit@devbox Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Tested-by: Francis Deslauriers <francis.deslauriers@efficios.com> [ Slight grammar and spelling fixes ] Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/Kconfig20
-rw-r--r--kernel/trace/trace_kprobe.c47
2 files changed, 58 insertions, 9 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e15fadbe5dfb..4d4eb15cc7fd 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -456,6 +456,26 @@ config KPROBE_EVENTS
456 This option is also required by perf-probe subcommand of perf tools. 456 This option is also required by perf-probe subcommand of perf tools.
457 If you want to use perf tools, this option is strongly recommended. 457 If you want to use perf tools, this option is strongly recommended.
458 458
459config KPROBE_EVENTS_ON_NOTRACE
460 bool "Do NOT protect notrace function from kprobe events"
461 depends on KPROBE_EVENTS
462 depends on KPROBES_ON_FTRACE
463 default n
464 help
465 This is only for the developers who want to debug ftrace itself
466 using kprobe events.
467
468 If kprobes can use ftrace instead of breakpoint, ftrace related
469 functions are protected from kprobe-events to prevent an infinit
470 recursion or any unexpected execution path which leads to a kernel
471 crash.
472
473 This option disables such protection and allows you to put kprobe
474 events on ftrace functions for debugging ftrace by itself.
475 Note that this might let you shoot yourself in the foot.
476
477 If unsure, say N.
478
459config UPROBE_EVENTS 479config UPROBE_EVENTS
460 bool "Enable uprobes-based dynamic events" 480 bool "Enable uprobes-based dynamic events"
461 depends on ARCH_SUPPORTS_UPROBES 481 depends on ARCH_SUPPORTS_UPROBES
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 0534eb8b7640..25662a780fdf 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -87,6 +87,21 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
87 return nhit; 87 return nhit;
88} 88}
89 89
90static nokprobe_inline
91unsigned long trace_kprobe_address(struct trace_kprobe *tk)
92{
93 unsigned long addr;
94
95 if (tk->symbol) {
96 addr = (unsigned long)
97 kallsyms_lookup_name(trace_kprobe_symbol(tk));
98 addr += tk->rp.kp.offset;
99 } else {
100 addr = (unsigned long)tk->rp.kp.addr;
101 }
102 return addr;
103}
104
90bool trace_kprobe_on_func_entry(struct trace_event_call *call) 105bool trace_kprobe_on_func_entry(struct trace_event_call *call)
91{ 106{
92 struct trace_kprobe *tk = (struct trace_kprobe *)call->data; 107 struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
@@ -99,16 +114,8 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
99bool trace_kprobe_error_injectable(struct trace_event_call *call) 114bool trace_kprobe_error_injectable(struct trace_event_call *call)
100{ 115{
101 struct trace_kprobe *tk = (struct trace_kprobe *)call->data; 116 struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
102 unsigned long addr;
103 117
104 if (tk->symbol) { 118 return within_error_injection_list(trace_kprobe_address(tk));
105 addr = (unsigned long)
106 kallsyms_lookup_name(trace_kprobe_symbol(tk));
107 addr += tk->rp.kp.offset;
108 } else {
109 addr = (unsigned long)tk->rp.kp.addr;
110 }
111 return within_error_injection_list(addr);
112} 119}
113 120
114static int register_kprobe_event(struct trace_kprobe *tk); 121static int register_kprobe_event(struct trace_kprobe *tk);
@@ -504,6 +511,22 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
504 return ret; 511 return ret;
505} 512}
506 513
514#if defined(CONFIG_KPROBES_ON_FTRACE) && \
515 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
516static bool within_notrace_func(struct trace_kprobe *tk)
517{
518 unsigned long offset, size, addr;
519
520 addr = trace_kprobe_address(tk);
521 if (!kallsyms_lookup_size_offset(addr, &size, &offset))
522 return true; /* Out of range. */
523
524 return !ftrace_location_range(addr - offset, addr - offset + size);
525}
526#else
527#define within_notrace_func(tk) (false)
528#endif
529
507/* Internal register function - just handle k*probes and flags */ 530/* Internal register function - just handle k*probes and flags */
508static int __register_trace_kprobe(struct trace_kprobe *tk) 531static int __register_trace_kprobe(struct trace_kprobe *tk)
509{ 532{
@@ -512,6 +535,12 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
512 if (trace_probe_is_registered(&tk->tp)) 535 if (trace_probe_is_registered(&tk->tp))
513 return -EINVAL; 536 return -EINVAL;
514 537
538 if (within_notrace_func(tk)) {
539 pr_warn("Could not probe notrace function %s\n",
540 trace_kprobe_symbol(tk));
541 return -EINVAL;
542 }
543
515 for (i = 0; i < tk->tp.nr_args; i++) 544 for (i = 0; i < tk->tp.nr_args; i++)
516 traceprobe_update_arg(&tk->tp.args[i]); 545 traceprobe_update_arg(&tk->tp.args[i]);
517 546