diff options
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r-- | kernel/trace/trace_functions.c | 227 |
1 files changed, 155 insertions, 72 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 0efa00d80623..a3bddbfd0874 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -267,10 +267,14 @@ static struct tracer function_trace __tracer_data = | |||
267 | }; | 267 | }; |
268 | 268 | ||
269 | #ifdef CONFIG_DYNAMIC_FTRACE | 269 | #ifdef CONFIG_DYNAMIC_FTRACE |
270 | static void update_traceon_count(void **data, bool on) | 270 | static void update_traceon_count(struct ftrace_probe_ops *ops, |
271 | unsigned long ip, | ||
272 | struct trace_array *tr, bool on, | ||
273 | void *data) | ||
271 | { | 274 | { |
272 | long *count = (long *)data; | 275 | struct ftrace_func_mapper *mapper = data; |
273 | long old_count = *count; | 276 | long *count; |
277 | long old_count; | ||
274 | 278 | ||
275 | /* | 279 | /* |
276 | * Tracing gets disabled (or enabled) once per count. | 280 | * Tracing gets disabled (or enabled) once per count. |
@@ -301,23 +305,22 @@ static void update_traceon_count(void **data, bool on) | |||
301 | * setting the tracing_on file. But we currently don't care | 305 | * setting the tracing_on file. But we currently don't care |
302 | * about that. | 306 | * about that. |
303 | */ | 307 | */ |
304 | if (!old_count) | 308 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
309 | old_count = *count; | ||
310 | |||
311 | if (old_count <= 0) | ||
305 | return; | 312 | return; |
306 | 313 | ||
307 | /* Make sure we see count before checking tracing state */ | 314 | /* Make sure we see count before checking tracing state */ |
308 | smp_rmb(); | 315 | smp_rmb(); |
309 | 316 | ||
310 | if (on == !!tracing_is_on()) | 317 | if (on == !!tracer_tracing_is_on(tr)) |
311 | return; | 318 | return; |
312 | 319 | ||
313 | if (on) | 320 | if (on) |
314 | tracing_on(); | 321 | tracer_tracing_on(tr); |
315 | else | 322 | else |
316 | tracing_off(); | 323 | tracer_tracing_off(tr); |
317 | |||
318 | /* unlimited? */ | ||
319 | if (old_count == -1) | ||
320 | return; | ||
321 | 324 | ||
322 | /* Make sure tracing state is visible before updating count */ | 325 | /* Make sure tracing state is visible before updating count */ |
323 | smp_wmb(); | 326 | smp_wmb(); |
@@ -326,33 +329,41 @@ static void update_traceon_count(void **data, bool on) | |||
326 | } | 329 | } |
327 | 330 | ||
328 | static void | 331 | static void |
329 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) | 332 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, |
333 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
334 | void *data) | ||
330 | { | 335 | { |
331 | update_traceon_count(data, 1); | 336 | update_traceon_count(ops, ip, tr, 1, data); |
332 | } | 337 | } |
333 | 338 | ||
334 | static void | 339 | static void |
335 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) | 340 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, |
341 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
342 | void *data) | ||
336 | { | 343 | { |
337 | update_traceon_count(data, 0); | 344 | update_traceon_count(ops, ip, tr, 0, data); |
338 | } | 345 | } |
339 | 346 | ||
340 | static void | 347 | static void |
341 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) | 348 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, |
349 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
350 | void *data) | ||
342 | { | 351 | { |
343 | if (tracing_is_on()) | 352 | if (tracer_tracing_is_on(tr)) |
344 | return; | 353 | return; |
345 | 354 | ||
346 | tracing_on(); | 355 | tracer_tracing_on(tr); |
347 | } | 356 | } |
348 | 357 | ||
349 | static void | 358 | static void |
350 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) | 359 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, |
360 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
361 | void *data) | ||
351 | { | 362 | { |
352 | if (!tracing_is_on()) | 363 | if (!tracer_tracing_is_on(tr)) |
353 | return; | 364 | return; |
354 | 365 | ||
355 | tracing_off(); | 366 | tracer_tracing_off(tr); |
356 | } | 367 | } |
357 | 368 | ||
358 | /* | 369 | /* |
@@ -364,144 +375,218 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) | |||
364 | */ | 375 | */ |
365 | #define STACK_SKIP 4 | 376 | #define STACK_SKIP 4 |
366 | 377 | ||
378 | static __always_inline void trace_stack(struct trace_array *tr) | ||
379 | { | ||
380 | unsigned long flags; | ||
381 | int pc; | ||
382 | |||
383 | local_save_flags(flags); | ||
384 | pc = preempt_count(); | ||
385 | |||
386 | __trace_stack(tr, flags, STACK_SKIP, pc); | ||
387 | } | ||
388 | |||
367 | static void | 389 | static void |
368 | ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) | 390 | ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, |
391 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
392 | void *data) | ||
369 | { | 393 | { |
370 | trace_dump_stack(STACK_SKIP); | 394 | trace_stack(tr); |
371 | } | 395 | } |
372 | 396 | ||
373 | static void | 397 | static void |
374 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) | 398 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, |
399 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
400 | void *data) | ||
375 | { | 401 | { |
376 | long *count = (long *)data; | 402 | struct ftrace_func_mapper *mapper = data; |
403 | long *count; | ||
377 | long old_count; | 404 | long old_count; |
378 | long new_count; | 405 | long new_count; |
379 | 406 | ||
407 | if (!tracing_is_on()) | ||
408 | return; | ||
409 | |||
410 | /* unlimited? */ | ||
411 | if (!mapper) { | ||
412 | trace_stack(tr); | ||
413 | return; | ||
414 | } | ||
415 | |||
416 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); | ||
417 | |||
380 | /* | 418 | /* |
381 | * Stack traces should only execute the number of times the | 419 | * Stack traces should only execute the number of times the |
382 | * user specified in the counter. | 420 | * user specified in the counter. |
383 | */ | 421 | */ |
384 | do { | 422 | do { |
385 | |||
386 | if (!tracing_is_on()) | ||
387 | return; | ||
388 | |||
389 | old_count = *count; | 423 | old_count = *count; |
390 | 424 | ||
391 | if (!old_count) | 425 | if (!old_count) |
392 | return; | 426 | return; |
393 | 427 | ||
394 | /* unlimited? */ | ||
395 | if (old_count == -1) { | ||
396 | trace_dump_stack(STACK_SKIP); | ||
397 | return; | ||
398 | } | ||
399 | |||
400 | new_count = old_count - 1; | 428 | new_count = old_count - 1; |
401 | new_count = cmpxchg(count, old_count, new_count); | 429 | new_count = cmpxchg(count, old_count, new_count); |
402 | if (new_count == old_count) | 430 | if (new_count == old_count) |
403 | trace_dump_stack(STACK_SKIP); | 431 | trace_stack(tr); |
432 | |||
433 | if (!tracing_is_on()) | ||
434 | return; | ||
404 | 435 | ||
405 | } while (new_count != old_count); | 436 | } while (new_count != old_count); |
406 | } | 437 | } |
407 | 438 | ||
408 | static int update_count(void **data) | 439 | static int update_count(struct ftrace_probe_ops *ops, unsigned long ip, |
440 | void *data) | ||
409 | { | 441 | { |
410 | unsigned long *count = (long *)data; | 442 | struct ftrace_func_mapper *mapper = data; |
443 | long *count = NULL; | ||
411 | 444 | ||
412 | if (!*count) | 445 | if (mapper) |
413 | return 0; | 446 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
414 | 447 | ||
415 | if (*count != -1) | 448 | if (count) { |
449 | if (*count <= 0) | ||
450 | return 0; | ||
416 | (*count)--; | 451 | (*count)--; |
452 | } | ||
417 | 453 | ||
418 | return 1; | 454 | return 1; |
419 | } | 455 | } |
420 | 456 | ||
421 | static void | 457 | static void |
422 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data) | 458 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, |
459 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
460 | void *data) | ||
423 | { | 461 | { |
424 | if (update_count(data)) | 462 | if (update_count(ops, ip, data)) |
425 | ftrace_dump(DUMP_ALL); | 463 | ftrace_dump(DUMP_ALL); |
426 | } | 464 | } |
427 | 465 | ||
428 | /* Only dump the current CPU buffer. */ | 466 | /* Only dump the current CPU buffer. */ |
429 | static void | 467 | static void |
430 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data) | 468 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, |
469 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
470 | void *data) | ||
431 | { | 471 | { |
432 | if (update_count(data)) | 472 | if (update_count(ops, ip, data)) |
433 | ftrace_dump(DUMP_ORIG); | 473 | ftrace_dump(DUMP_ORIG); |
434 | } | 474 | } |
435 | 475 | ||
436 | static int | 476 | static int |
437 | ftrace_probe_print(const char *name, struct seq_file *m, | 477 | ftrace_probe_print(const char *name, struct seq_file *m, |
438 | unsigned long ip, void *data) | 478 | unsigned long ip, struct ftrace_probe_ops *ops, |
479 | void *data) | ||
439 | { | 480 | { |
440 | long count = (long)data; | 481 | struct ftrace_func_mapper *mapper = data; |
482 | long *count = NULL; | ||
441 | 483 | ||
442 | seq_printf(m, "%ps:%s", (void *)ip, name); | 484 | seq_printf(m, "%ps:%s", (void *)ip, name); |
443 | 485 | ||
444 | if (count == -1) | 486 | if (mapper) |
445 | seq_puts(m, ":unlimited\n"); | 487 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
488 | |||
489 | if (count) | ||
490 | seq_printf(m, ":count=%ld\n", *count); | ||
446 | else | 491 | else |
447 | seq_printf(m, ":count=%ld\n", count); | 492 | seq_puts(m, ":unlimited\n"); |
448 | 493 | ||
449 | return 0; | 494 | return 0; |
450 | } | 495 | } |
451 | 496 | ||
452 | static int | 497 | static int |
453 | ftrace_traceon_print(struct seq_file *m, unsigned long ip, | 498 | ftrace_traceon_print(struct seq_file *m, unsigned long ip, |
454 | struct ftrace_probe_ops *ops, void *data) | 499 | struct ftrace_probe_ops *ops, |
500 | void *data) | ||
455 | { | 501 | { |
456 | return ftrace_probe_print("traceon", m, ip, data); | 502 | return ftrace_probe_print("traceon", m, ip, ops, data); |
457 | } | 503 | } |
458 | 504 | ||
459 | static int | 505 | static int |
460 | ftrace_traceoff_print(struct seq_file *m, unsigned long ip, | 506 | ftrace_traceoff_print(struct seq_file *m, unsigned long ip, |
461 | struct ftrace_probe_ops *ops, void *data) | 507 | struct ftrace_probe_ops *ops, void *data) |
462 | { | 508 | { |
463 | return ftrace_probe_print("traceoff", m, ip, data); | 509 | return ftrace_probe_print("traceoff", m, ip, ops, data); |
464 | } | 510 | } |
465 | 511 | ||
466 | static int | 512 | static int |
467 | ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, | 513 | ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, |
468 | struct ftrace_probe_ops *ops, void *data) | 514 | struct ftrace_probe_ops *ops, void *data) |
469 | { | 515 | { |
470 | return ftrace_probe_print("stacktrace", m, ip, data); | 516 | return ftrace_probe_print("stacktrace", m, ip, ops, data); |
471 | } | 517 | } |
472 | 518 | ||
473 | static int | 519 | static int |
474 | ftrace_dump_print(struct seq_file *m, unsigned long ip, | 520 | ftrace_dump_print(struct seq_file *m, unsigned long ip, |
475 | struct ftrace_probe_ops *ops, void *data) | 521 | struct ftrace_probe_ops *ops, void *data) |
476 | { | 522 | { |
477 | return ftrace_probe_print("dump", m, ip, data); | 523 | return ftrace_probe_print("dump", m, ip, ops, data); |
478 | } | 524 | } |
479 | 525 | ||
480 | static int | 526 | static int |
481 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, | 527 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, |
482 | struct ftrace_probe_ops *ops, void *data) | 528 | struct ftrace_probe_ops *ops, void *data) |
483 | { | 529 | { |
484 | return ftrace_probe_print("cpudump", m, ip, data); | 530 | return ftrace_probe_print("cpudump", m, ip, ops, data); |
531 | } | ||
532 | |||
533 | |||
534 | static int | ||
535 | ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr, | ||
536 | unsigned long ip, void *init_data, void **data) | ||
537 | { | ||
538 | struct ftrace_func_mapper *mapper = *data; | ||
539 | |||
540 | if (!mapper) { | ||
541 | mapper = allocate_ftrace_func_mapper(); | ||
542 | if (!mapper) | ||
543 | return -ENOMEM; | ||
544 | *data = mapper; | ||
545 | } | ||
546 | |||
547 | return ftrace_func_mapper_add_ip(mapper, ip, init_data); | ||
548 | } | ||
549 | |||
550 | static void | ||
551 | ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr, | ||
552 | unsigned long ip, void *data) | ||
553 | { | ||
554 | struct ftrace_func_mapper *mapper = data; | ||
555 | |||
556 | if (!ip) { | ||
557 | free_ftrace_func_mapper(mapper, NULL); | ||
558 | return; | ||
559 | } | ||
560 | |||
561 | ftrace_func_mapper_remove_ip(mapper, ip); | ||
485 | } | 562 | } |
486 | 563 | ||
487 | static struct ftrace_probe_ops traceon_count_probe_ops = { | 564 | static struct ftrace_probe_ops traceon_count_probe_ops = { |
488 | .func = ftrace_traceon_count, | 565 | .func = ftrace_traceon_count, |
489 | .print = ftrace_traceon_print, | 566 | .print = ftrace_traceon_print, |
567 | .init = ftrace_count_init, | ||
568 | .free = ftrace_count_free, | ||
490 | }; | 569 | }; |
491 | 570 | ||
492 | static struct ftrace_probe_ops traceoff_count_probe_ops = { | 571 | static struct ftrace_probe_ops traceoff_count_probe_ops = { |
493 | .func = ftrace_traceoff_count, | 572 | .func = ftrace_traceoff_count, |
494 | .print = ftrace_traceoff_print, | 573 | .print = ftrace_traceoff_print, |
574 | .init = ftrace_count_init, | ||
575 | .free = ftrace_count_free, | ||
495 | }; | 576 | }; |
496 | 577 | ||
497 | static struct ftrace_probe_ops stacktrace_count_probe_ops = { | 578 | static struct ftrace_probe_ops stacktrace_count_probe_ops = { |
498 | .func = ftrace_stacktrace_count, | 579 | .func = ftrace_stacktrace_count, |
499 | .print = ftrace_stacktrace_print, | 580 | .print = ftrace_stacktrace_print, |
581 | .init = ftrace_count_init, | ||
582 | .free = ftrace_count_free, | ||
500 | }; | 583 | }; |
501 | 584 | ||
502 | static struct ftrace_probe_ops dump_probe_ops = { | 585 | static struct ftrace_probe_ops dump_probe_ops = { |
503 | .func = ftrace_dump_probe, | 586 | .func = ftrace_dump_probe, |
504 | .print = ftrace_dump_print, | 587 | .print = ftrace_dump_print, |
588 | .init = ftrace_count_init, | ||
589 | .free = ftrace_count_free, | ||
505 | }; | 590 | }; |
506 | 591 | ||
507 | static struct ftrace_probe_ops cpudump_probe_ops = { | 592 | static struct ftrace_probe_ops cpudump_probe_ops = { |
@@ -525,7 +610,8 @@ static struct ftrace_probe_ops stacktrace_probe_ops = { | |||
525 | }; | 610 | }; |
526 | 611 | ||
527 | static int | 612 | static int |
528 | ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, | 613 | ftrace_trace_probe_callback(struct trace_array *tr, |
614 | struct ftrace_probe_ops *ops, | ||
529 | struct ftrace_hash *hash, char *glob, | 615 | struct ftrace_hash *hash, char *glob, |
530 | char *cmd, char *param, int enable) | 616 | char *cmd, char *param, int enable) |
531 | { | 617 | { |
@@ -537,10 +623,8 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, | |||
537 | if (!enable) | 623 | if (!enable) |
538 | return -EINVAL; | 624 | return -EINVAL; |
539 | 625 | ||
540 | if (glob[0] == '!') { | 626 | if (glob[0] == '!') |
541 | unregister_ftrace_function_probe_func(glob+1, ops); | 627 | return unregister_ftrace_function_probe_func(glob+1, tr, ops); |
542 | return 0; | ||
543 | } | ||
544 | 628 | ||
545 | if (!param) | 629 | if (!param) |
546 | goto out_reg; | 630 | goto out_reg; |
@@ -559,13 +643,13 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, | |||
559 | return ret; | 643 | return ret; |
560 | 644 | ||
561 | out_reg: | 645 | out_reg: |
562 | ret = register_ftrace_function_probe(glob, ops, count); | 646 | ret = register_ftrace_function_probe(glob, tr, ops, count); |
563 | 647 | ||
564 | return ret < 0 ? ret : 0; | 648 | return ret < 0 ? ret : 0; |
565 | } | 649 | } |
566 | 650 | ||
567 | static int | 651 | static int |
568 | ftrace_trace_onoff_callback(struct ftrace_hash *hash, | 652 | ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash, |
569 | char *glob, char *cmd, char *param, int enable) | 653 | char *glob, char *cmd, char *param, int enable) |
570 | { | 654 | { |
571 | struct ftrace_probe_ops *ops; | 655 | struct ftrace_probe_ops *ops; |
@@ -576,24 +660,24 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, | |||
576 | else | 660 | else |
577 | ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; | 661 | ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; |
578 | 662 | ||
579 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | 663 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
580 | param, enable); | 664 | param, enable); |
581 | } | 665 | } |
582 | 666 | ||
583 | static int | 667 | static int |
584 | ftrace_stacktrace_callback(struct ftrace_hash *hash, | 668 | ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash, |
585 | char *glob, char *cmd, char *param, int enable) | 669 | char *glob, char *cmd, char *param, int enable) |
586 | { | 670 | { |
587 | struct ftrace_probe_ops *ops; | 671 | struct ftrace_probe_ops *ops; |
588 | 672 | ||
589 | ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; | 673 | ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; |
590 | 674 | ||
591 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | 675 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
592 | param, enable); | 676 | param, enable); |
593 | } | 677 | } |
594 | 678 | ||
595 | static int | 679 | static int |
596 | ftrace_dump_callback(struct ftrace_hash *hash, | 680 | ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
597 | char *glob, char *cmd, char *param, int enable) | 681 | char *glob, char *cmd, char *param, int enable) |
598 | { | 682 | { |
599 | struct ftrace_probe_ops *ops; | 683 | struct ftrace_probe_ops *ops; |
@@ -601,12 +685,12 @@ ftrace_dump_callback(struct ftrace_hash *hash, | |||
601 | ops = &dump_probe_ops; | 685 | ops = &dump_probe_ops; |
602 | 686 | ||
603 | /* Only dump once. */ | 687 | /* Only dump once. */ |
604 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | 688 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
605 | "1", enable); | 689 | "1", enable); |
606 | } | 690 | } |
607 | 691 | ||
608 | static int | 692 | static int |
609 | ftrace_cpudump_callback(struct ftrace_hash *hash, | 693 | ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
610 | char *glob, char *cmd, char *param, int enable) | 694 | char *glob, char *cmd, char *param, int enable) |
611 | { | 695 | { |
612 | struct ftrace_probe_ops *ops; | 696 | struct ftrace_probe_ops *ops; |
@@ -614,7 +698,7 @@ ftrace_cpudump_callback(struct ftrace_hash *hash, | |||
614 | ops = &cpudump_probe_ops; | 698 | ops = &cpudump_probe_ops; |
615 | 699 | ||
616 | /* Only dump once. */ | 700 | /* Only dump once. */ |
617 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | 701 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
618 | "1", enable); | 702 | "1", enable); |
619 | } | 703 | } |
620 | 704 | ||
@@ -687,9 +771,8 @@ static inline int init_func_cmd_traceon(void) | |||
687 | } | 771 | } |
688 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 772 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
689 | 773 | ||
690 | static __init int init_function_trace(void) | 774 | __init int init_function_trace(void) |
691 | { | 775 | { |
692 | init_func_cmd_traceon(); | 776 | init_func_cmd_traceon(); |
693 | return register_tracer(&function_trace); | 777 | return register_tracer(&function_trace); |
694 | } | 778 | } |
695 | core_initcall(init_function_trace); | ||