diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 14:35:36 -0500 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 14:35:36 -0500 |
commit | 4ba24fef3eb3b142197135223b90ced2f319cd53 (patch) | |
tree | a20c125b27740ec7b4c761b11d801108e1b316b2 /kernel/trace/trace_functions.c | |
parent | 47c1ffb2b6b630894e9a16442611c056ab21c057 (diff) | |
parent | 98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff) |
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r-- | kernel/trace/trace_functions.c | 119 |
1 files changed, 97 insertions, 22 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 57f0ec962d2c..fcd41a166405 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -261,37 +261,74 @@ static struct tracer function_trace __tracer_data = | |||
261 | }; | 261 | }; |
262 | 262 | ||
263 | #ifdef CONFIG_DYNAMIC_FTRACE | 263 | #ifdef CONFIG_DYNAMIC_FTRACE |
264 | static int update_count(void **data) | 264 | static void update_traceon_count(void **data, bool on) |
265 | { | 265 | { |
266 | unsigned long *count = (long *)data; | 266 | long *count = (long *)data; |
267 | long old_count = *count; | ||
267 | 268 | ||
268 | if (!*count) | 269 | /* |
269 | return 0; | 270 | * Tracing gets disabled (or enabled) once per count. |
271 | * This function can be called at the same time on multiple CPUs. | ||
272 | * It is fine if both disable (or enable) tracing, as disabling | ||
273 | * (or enabling) the second time doesn't do anything as the | ||
274 | * state of the tracer is already disabled (or enabled). | ||
275 | * What needs to be synchronized in this case is that the count | ||
276 | * only gets decremented once, even if the tracer is disabled | ||
277 | * (or enabled) twice, as the second one is really a nop. | ||
278 | * | ||
279 | * The memory barriers guarantee that we only decrement the | ||
280 | * counter once. First the count is read to a local variable | ||
281 | * and a read barrier is used to make sure that it is loaded | ||
282 | * before checking if the tracer is in the state we want. | ||
283 | * If the tracer is not in the state we want, then the count | ||
284 | * is guaranteed to be the old count. | ||
285 | * | ||
286 | * Next the tracer is set to the state we want (disabled or enabled) | ||
287 | * then a write memory barrier is used to make sure that | ||
288 | * the new state is visible before changing the counter by | ||
289 | * one minus the old counter. This guarantees that another CPU | ||
290 | * executing this code will see the new state before seeing | ||
291 | * the new counter value, and would not do anything if the new | ||
292 | * counter is seen. | ||
293 | * | ||
294 | * Note, there is no synchronization between this and a user | ||
295 | * setting the tracing_on file. But we currently don't care | ||
296 | * about that. | ||
297 | */ | ||
298 | if (!old_count) | ||
299 | return; | ||
270 | 300 | ||
271 | if (*count != -1) | 301 | /* Make sure we see count before checking tracing state */ |
272 | (*count)--; | 302 | smp_rmb(); |
273 | 303 | ||
274 | return 1; | 304 | if (on == !!tracing_is_on()) |
305 | return; | ||
306 | |||
307 | if (on) | ||
308 | tracing_on(); | ||
309 | else | ||
310 | tracing_off(); | ||
311 | |||
312 | /* unlimited? */ | ||
313 | if (old_count == -1) | ||
314 | return; | ||
315 | |||
316 | /* Make sure tracing state is visible before updating count */ | ||
317 | smp_wmb(); | ||
318 | |||
319 | *count = old_count - 1; | ||
275 | } | 320 | } |
276 | 321 | ||
277 | static void | 322 | static void |
278 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) | 323 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) |
279 | { | 324 | { |
280 | if (tracing_is_on()) | 325 | update_traceon_count(data, 1); |
281 | return; | ||
282 | |||
283 | if (update_count(data)) | ||
284 | tracing_on(); | ||
285 | } | 326 | } |
286 | 327 | ||
287 | static void | 328 | static void |
288 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) | 329 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) |
289 | { | 330 | { |
290 | if (!tracing_is_on()) | 331 | update_traceon_count(data, 0); |
291 | return; | ||
292 | |||
293 | if (update_count(data)) | ||
294 | tracing_off(); | ||
295 | } | 332 | } |
296 | 333 | ||
297 | static void | 334 | static void |
@@ -330,11 +367,49 @@ ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) | |||
330 | static void | 367 | static void |
331 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) | 368 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) |
332 | { | 369 | { |
333 | if (!tracing_is_on()) | 370 | long *count = (long *)data; |
334 | return; | 371 | long old_count; |
372 | long new_count; | ||
335 | 373 | ||
336 | if (update_count(data)) | 374 | /* |
337 | trace_dump_stack(STACK_SKIP); | 375 | * Stack traces should only execute the number of times the |
376 | * user specified in the counter. | ||
377 | */ | ||
378 | do { | ||
379 | |||
380 | if (!tracing_is_on()) | ||
381 | return; | ||
382 | |||
383 | old_count = *count; | ||
384 | |||
385 | if (!old_count) | ||
386 | return; | ||
387 | |||
388 | /* unlimited? */ | ||
389 | if (old_count == -1) { | ||
390 | trace_dump_stack(STACK_SKIP); | ||
391 | return; | ||
392 | } | ||
393 | |||
394 | new_count = old_count - 1; | ||
395 | new_count = cmpxchg(count, old_count, new_count); | ||
396 | if (new_count == old_count) | ||
397 | trace_dump_stack(STACK_SKIP); | ||
398 | |||
399 | } while (new_count != old_count); | ||
400 | } | ||
401 | |||
402 | static int update_count(void **data) | ||
403 | { | ||
404 | unsigned long *count = (long *)data; | ||
405 | |||
406 | if (!*count) | ||
407 | return 0; | ||
408 | |||
409 | if (*count != -1) | ||
410 | (*count)--; | ||
411 | |||
412 | return 1; | ||
338 | } | 413 | } |
339 | 414 | ||
340 | static void | 415 | static void |
@@ -361,7 +436,7 @@ ftrace_probe_print(const char *name, struct seq_file *m, | |||
361 | seq_printf(m, "%ps:%s", (void *)ip, name); | 436 | seq_printf(m, "%ps:%s", (void *)ip, name); |
362 | 437 | ||
363 | if (count == -1) | 438 | if (count == -1) |
364 | seq_printf(m, ":unlimited\n"); | 439 | seq_puts(m, ":unlimited\n"); |
365 | else | 440 | else |
366 | seq_printf(m, ":count=%ld\n", count); | 441 | seq_printf(m, ":count=%ld\n", count); |
367 | 442 | ||