aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_kprobe.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_kprobe.c')
-rw-r--r--kernel/trace/trace_kprobe.c50
1 files changed, 32 insertions, 18 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 7ed6976493c8..243f6834d026 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
95} 95}
96 96
97static int register_probe_event(struct trace_probe *tp); 97static int register_probe_event(struct trace_probe *tp);
98static void unregister_probe_event(struct trace_probe *tp); 98static int unregister_probe_event(struct trace_probe *tp);
99 99
100static DEFINE_MUTEX(probe_lock); 100static DEFINE_MUTEX(probe_lock);
101static LIST_HEAD(probe_list); 101static LIST_HEAD(probe_list);
@@ -243,11 +243,11 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
243static int 243static int
244disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) 244disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
245{ 245{
246 struct event_file_link *link = NULL;
247 int wait = 0;
246 int ret = 0; 248 int ret = 0;
247 249
248 if (file) { 250 if (file) {
249 struct event_file_link *link;
250
251 link = find_event_file_link(tp, file); 251 link = find_event_file_link(tp, file);
252 if (!link) { 252 if (!link) {
253 ret = -EINVAL; 253 ret = -EINVAL;
@@ -255,10 +255,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
255 } 255 }
256 256
257 list_del_rcu(&link->list); 257 list_del_rcu(&link->list);
258 /* synchronize with kprobe_trace_func/kretprobe_trace_func */ 258 wait = 1;
259 synchronize_sched();
260 kfree(link);
261
262 if (!list_empty(&tp->files)) 259 if (!list_empty(&tp->files))
263 goto out; 260 goto out;
264 261
@@ -271,8 +268,22 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
271 disable_kretprobe(&tp->rp); 268 disable_kretprobe(&tp->rp);
272 else 269 else
273 disable_kprobe(&tp->rp.kp); 270 disable_kprobe(&tp->rp.kp);
271 wait = 1;
274 } 272 }
275 out: 273 out:
274 if (wait) {
275 /*
276 * Synchronize with kprobe_trace_func/kretprobe_trace_func
277 * to ensure disabled (all running handlers are finished).
278 * This is not only for kfree(), but also the caller,
279 * trace_remove_event_call() supposes it for releasing
280 * event_call related objects, which will be accessed in
281 * the kprobe_trace_func/kretprobe_trace_func.
282 */
283 synchronize_sched();
284 kfree(link); /* Ignored if link == NULL */
285 }
286
276 return ret; 287 return ret;
277} 288}
278 289
@@ -340,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
340 if (trace_probe_is_enabled(tp)) 351 if (trace_probe_is_enabled(tp))
341 return -EBUSY; 352 return -EBUSY;
342 353
354 /* Will fail if probe is being used by ftrace or perf */
355 if (unregister_probe_event(tp))
356 return -EBUSY;
357
343 __unregister_trace_probe(tp); 358 __unregister_trace_probe(tp);
344 list_del(&tp->list); 359 list_del(&tp->list);
345 unregister_probe_event(tp);
346 360
347 return 0; 361 return 0;
348} 362}
@@ -621,7 +635,9 @@ static int release_all_trace_probes(void)
621 /* TODO: Use batch unregistration */ 635 /* TODO: Use batch unregistration */
622 while (!list_empty(&probe_list)) { 636 while (!list_empty(&probe_list)) {
623 tp = list_entry(probe_list.next, struct trace_probe, list); 637 tp = list_entry(probe_list.next, struct trace_probe, list);
624 unregister_trace_probe(tp); 638 ret = unregister_trace_probe(tp);
639 if (ret)
640 goto end;
625 free_trace_probe(tp); 641 free_trace_probe(tp);
626 } 642 }
627 643
@@ -1087,9 +1103,6 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1087 __size = sizeof(*entry) + tp->size + dsize; 1103 __size = sizeof(*entry) + tp->size + dsize;
1088 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1104 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1089 size -= sizeof(u32); 1105 size -= sizeof(u32);
1090 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1091 "profile buffer not large enough"))
1092 return;
1093 1106
1094 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1107 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1095 if (!entry) 1108 if (!entry)
@@ -1120,9 +1133,6 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1120 __size = sizeof(*entry) + tp->size + dsize; 1133 __size = sizeof(*entry) + tp->size + dsize;
1121 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1134 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1122 size -= sizeof(u32); 1135 size -= sizeof(u32);
1123 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1124 "profile buffer not large enough"))
1125 return;
1126 1136
1127 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1137 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1128 if (!entry) 1138 if (!entry)
@@ -1242,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
1242 return ret; 1252 return ret;
1243} 1253}
1244 1254
1245static void unregister_probe_event(struct trace_probe *tp) 1255static int unregister_probe_event(struct trace_probe *tp)
1246{ 1256{
1257 int ret;
1258
1247 /* tp->event is unregistered in trace_remove_event_call() */ 1259 /* tp->event is unregistered in trace_remove_event_call() */
1248 trace_remove_event_call(&tp->call); 1260 ret = trace_remove_event_call(&tp->call);
1249 kfree(tp->call.print_fmt); 1261 if (!ret)
1262 kfree(tp->call.print_fmt);
1263 return ret;
1250} 1264}
1251 1265
1252/* Make a debugfs interface for controlling probe points */ 1266/* Make a debugfs interface for controlling probe points */