aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/trace/bpf_trace.c1
-rw-r--r--kernel/trace/trace_kprobe.c21
2 files changed, 7 insertions, 15 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 1966ad3bf3e0..24ed6363e00f 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -83,7 +83,6 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
83#ifdef CONFIG_BPF_KPROBE_OVERRIDE 83#ifdef CONFIG_BPF_KPROBE_OVERRIDE
84BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 84BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
85{ 85{
86 __this_cpu_write(bpf_kprobe_override, 1);
87 regs_set_return_value(regs, rc); 86 regs_set_return_value(regs, rc);
88 arch_kprobe_override_function(regs); 87 arch_kprobe_override_function(regs);
89 return 0; 88 return 0;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3c8deb977a8b..b8c90441bc87 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -42,8 +42,6 @@ struct trace_kprobe {
42 (offsetof(struct trace_kprobe, tp.args) + \ 42 (offsetof(struct trace_kprobe, tp.args) + \
43 (sizeof(struct probe_arg) * (n))) 43 (sizeof(struct probe_arg) * (n)))
44 44
45DEFINE_PER_CPU(int, bpf_kprobe_override);
46
47static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) 45static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
48{ 46{
49 return tk->rp.handler != NULL; 47 return tk->rp.handler != NULL;
@@ -1205,6 +1203,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1205 int rctx; 1203 int rctx;
1206 1204
1207 if (bpf_prog_array_valid(call)) { 1205 if (bpf_prog_array_valid(call)) {
1206 unsigned long orig_ip = instruction_pointer(regs);
1208 int ret; 1207 int ret;
1209 1208
1210 ret = trace_call_bpf(call, regs); 1209 ret = trace_call_bpf(call, regs);
@@ -1212,12 +1211,13 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1212 /* 1211 /*
1213 * We need to check and see if we modified the pc of the 1212 * We need to check and see if we modified the pc of the
1214 * pt_regs, and if so clear the kprobe and return 1 so that we 1213 * pt_regs, and if so clear the kprobe and return 1 so that we
1215 * don't do the instruction skipping. Also reset our state so 1214 * don't do the single stepping.
1216 * we are clean the next pass through. 1215 * The ftrace kprobe handler leaves it up to us to re-enable
1216 * preemption here before returning if we've modified the ip.
1217 */ 1217 */
1218 if (__this_cpu_read(bpf_kprobe_override)) { 1218 if (orig_ip != instruction_pointer(regs)) {
1219 __this_cpu_write(bpf_kprobe_override, 0);
1220 reset_current_kprobe(); 1219 reset_current_kprobe();
1220 preempt_enable_no_resched();
1221 return 1; 1221 return 1;
1222 } 1222 }
1223 if (!ret) 1223 if (!ret)
@@ -1325,15 +1325,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1325 if (tk->tp.flags & TP_FLAG_TRACE) 1325 if (tk->tp.flags & TP_FLAG_TRACE)
1326 kprobe_trace_func(tk, regs); 1326 kprobe_trace_func(tk, regs);
1327#ifdef CONFIG_PERF_EVENTS 1327#ifdef CONFIG_PERF_EVENTS
1328 if (tk->tp.flags & TP_FLAG_PROFILE) { 1328 if (tk->tp.flags & TP_FLAG_PROFILE)
1329 ret = kprobe_perf_func(tk, regs); 1329 ret = kprobe_perf_func(tk, regs);
1330 /*
1331 * The ftrace kprobe handler leaves it up to us to re-enable
1332 * preemption here before returning if we've modified the ip.
1333 */
1334 if (ret)
1335 preempt_enable_no_resched();
1336 }
1337#endif 1330#endif
1338 return ret; 1331 return ret;
1339} 1332}