diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-06-20 13:38:09 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-07-01 20:34:24 -0400 |
commit | 3fe3d6193e7cd7b4dd2bde10772f048bdefea4ee (patch) | |
tree | f7623a40ffb23ffab302a8cb9ed6c5716ec1d691 /kernel/trace | |
parent | 288e984e622336bab8bc3dfdf2f190816362d9a1 (diff) |
tracing/kprobes: Kill probe_enable_lock
enable_trace_probe() and disable_trace_probe() should not worry about
serialization, the caller (perf_trace_init or __ftrace_set_clr_event)
holds event_mutex.
They are also called by kprobe_trace_self_tests_init(), but this __init
function can't race with itself or trace_events.c
And note that this code depended on event_mutex even before 41a7dd420c
which introduced probe_enable_lock. In fact it assumes that the caller
kprobe_register() can never race with itself. Otherwise, say, tp->flags
manipulations are racy.
Link: http://lkml.kernel.org/r/20130620173809.GA13158@redhat.com
Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace_kprobe.c | 43 |
1 files changed, 20 insertions, 23 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c35bebe53ffe..282f86cfd304 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -183,16 +183,15 @@ static struct trace_probe *find_trace_probe(const char *event, | |||
183 | return NULL; | 183 | return NULL; |
184 | } | 184 | } |
185 | 185 | ||
186 | /* | ||
187 | * This and enable_trace_probe/disable_trace_probe rely on event_mutex | ||
188 | * held by the caller, __ftrace_set_clr_event(). | ||
189 | */ | ||
186 | static int trace_probe_nr_files(struct trace_probe *tp) | 190 | static int trace_probe_nr_files(struct trace_probe *tp) |
187 | { | 191 | { |
188 | struct ftrace_event_file **file; | 192 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); |
189 | int ret = 0; | 193 | int ret = 0; |
190 | 194 | ||
191 | /* | ||
192 | * Since all tp->files updater is protected by probe_enable_lock, | ||
193 | * we don't need to lock an rcu_read_lock. | ||
194 | */ | ||
195 | file = rcu_dereference_raw(tp->files); | ||
196 | if (file) | 195 | if (file) |
197 | while (*(file++)) | 196 | while (*(file++)) |
198 | ret++; | 197 | ret++; |
@@ -200,8 +199,6 @@ static int trace_probe_nr_files(struct trace_probe *tp) | |||
200 | return ret; | 199 | return ret; |
201 | } | 200 | } |
202 | 201 | ||
203 | static DEFINE_MUTEX(probe_enable_lock); | ||
204 | |||
205 | /* | 202 | /* |
206 | * Enable trace_probe | 203 | * Enable trace_probe |
207 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. | 204 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. |
@@ -211,8 +208,6 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
211 | { | 208 | { |
212 | int ret = 0; | 209 | int ret = 0; |
213 | 210 | ||
214 | mutex_lock(&probe_enable_lock); | ||
215 | |||
216 | if (file) { | 211 | if (file) { |
217 | struct ftrace_event_file **new, **old; | 212 | struct ftrace_event_file **new, **old; |
218 | int n = trace_probe_nr_files(tp); | 213 | int n = trace_probe_nr_files(tp); |
@@ -223,7 +218,7 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
223 | GFP_KERNEL); | 218 | GFP_KERNEL); |
224 | if (!new) { | 219 | if (!new) { |
225 | ret = -ENOMEM; | 220 | ret = -ENOMEM; |
226 | goto out_unlock; | 221 | goto out; |
227 | } | 222 | } |
228 | memcpy(new, old, n * sizeof(struct ftrace_event_file *)); | 223 | memcpy(new, old, n * sizeof(struct ftrace_event_file *)); |
229 | new[n] = file; | 224 | new[n] = file; |
@@ -246,10 +241,7 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
246 | else | 241 | else |
247 | ret = enable_kprobe(&tp->rp.kp); | 242 | ret = enable_kprobe(&tp->rp.kp); |
248 | } | 243 | } |
249 | 244 | out: | |
250 | out_unlock: | ||
251 | mutex_unlock(&probe_enable_lock); | ||
252 | |||
253 | return ret; | 245 | return ret; |
254 | } | 246 | } |
255 | 247 | ||
@@ -282,8 +274,6 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
282 | { | 274 | { |
283 | int ret = 0; | 275 | int ret = 0; |
284 | 276 | ||
285 | mutex_lock(&probe_enable_lock); | ||
286 | |||
287 | if (file) { | 277 | if (file) { |
288 | struct ftrace_event_file **new, **old; | 278 | struct ftrace_event_file **new, **old; |
289 | int n = trace_probe_nr_files(tp); | 279 | int n = trace_probe_nr_files(tp); |
@@ -292,7 +282,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
292 | old = rcu_dereference_raw(tp->files); | 282 | old = rcu_dereference_raw(tp->files); |
293 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { | 283 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { |
294 | ret = -EINVAL; | 284 | ret = -EINVAL; |
295 | goto out_unlock; | 285 | goto out; |
296 | } | 286 | } |
297 | 287 | ||
298 | if (n == 1) { /* Remove the last file */ | 288 | if (n == 1) { /* Remove the last file */ |
@@ -303,7 +293,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
303 | GFP_KERNEL); | 293 | GFP_KERNEL); |
304 | if (!new) { | 294 | if (!new) { |
305 | ret = -ENOMEM; | 295 | ret = -ENOMEM; |
306 | goto out_unlock; | 296 | goto out; |
307 | } | 297 | } |
308 | 298 | ||
309 | /* This copy & check loop copies the NULL stopper too */ | 299 | /* This copy & check loop copies the NULL stopper too */ |
@@ -326,10 +316,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
326 | else | 316 | else |
327 | disable_kprobe(&tp->rp.kp); | 317 | disable_kprobe(&tp->rp.kp); |
328 | } | 318 | } |
329 | 319 | out: | |
330 | out_unlock: | ||
331 | mutex_unlock(&probe_enable_lock); | ||
332 | |||
333 | return ret; | 320 | return ret; |
334 | } | 321 | } |
335 | 322 | ||
@@ -1214,6 +1201,12 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
1214 | } | 1201 | } |
1215 | #endif /* CONFIG_PERF_EVENTS */ | 1202 | #endif /* CONFIG_PERF_EVENTS */ |
1216 | 1203 | ||
1204 | /* | ||
1205 | * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. | ||
1206 | * | ||
1207 | * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe | ||
1208 | * lockless, but we can't race with this __init function. | ||
1209 | */ | ||
1217 | static __kprobes | 1210 | static __kprobes |
1218 | int kprobe_register(struct ftrace_event_call *event, | 1211 | int kprobe_register(struct ftrace_event_call *event, |
1219 | enum trace_reg type, void *data) | 1212 | enum trace_reg type, void *data) |
@@ -1379,6 +1372,10 @@ find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr) | |||
1379 | return NULL; | 1372 | return NULL; |
1380 | } | 1373 | } |
1381 | 1374 | ||
1375 | /* | ||
1376 | * Nobody but us can call enable_trace_probe/disable_trace_probe at this | ||
1377 | * stage, we can do this lockless. | ||
1378 | */ | ||
1382 | static __init int kprobe_trace_self_tests_init(void) | 1379 | static __init int kprobe_trace_self_tests_init(void) |
1383 | { | 1380 | { |
1384 | int ret, warn = 0; | 1381 | int ret, warn = 0; |