diff options
Diffstat (limited to 'kernel/trace/trace_selftest.c')
| -rw-r--r-- | kernel/trace/trace_selftest.c | 169 |
1 files changed, 142 insertions, 27 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index bc8e80a86bca..08f4eb2763d1 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | /* Include in trace.c */ | 1 | /* Include in trace.c */ |
| 2 | 2 | ||
| 3 | #include <linux/stringify.h> | ||
| 3 | #include <linux/kthread.h> | 4 | #include <linux/kthread.h> |
| 4 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
| 5 | 6 | ||
| @@ -9,11 +10,12 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
| 9 | case TRACE_FN: | 10 | case TRACE_FN: |
| 10 | case TRACE_CTX: | 11 | case TRACE_CTX: |
| 11 | case TRACE_WAKE: | 12 | case TRACE_WAKE: |
| 12 | case TRACE_CONT: | ||
| 13 | case TRACE_STACK: | 13 | case TRACE_STACK: |
| 14 | case TRACE_PRINT: | 14 | case TRACE_PRINT: |
| 15 | case TRACE_SPECIAL: | 15 | case TRACE_SPECIAL: |
| 16 | case TRACE_BRANCH: | 16 | case TRACE_BRANCH: |
| 17 | case TRACE_GRAPH_ENT: | ||
| 18 | case TRACE_GRAPH_RET: | ||
| 17 | return 1; | 19 | return 1; |
| 18 | } | 20 | } |
| 19 | return 0; | 21 | return 0; |
| @@ -99,9 +101,6 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | |||
| 99 | 101 | ||
| 100 | #ifdef CONFIG_DYNAMIC_FTRACE | 102 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 101 | 103 | ||
| 102 | #define __STR(x) #x | ||
| 103 | #define STR(x) __STR(x) | ||
| 104 | |||
| 105 | /* Test dynamic code modification and ftrace filters */ | 104 | /* Test dynamic code modification and ftrace filters */ |
| 106 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 105 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
| 107 | struct trace_array *tr, | 106 | struct trace_array *tr, |
| @@ -125,17 +124,17 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
| 125 | func(); | 124 | func(); |
| 126 | 125 | ||
| 127 | /* | 126 | /* |
| 128 | * Some archs *cough*PowerPC*cough* add charachters to the | 127 | * Some archs *cough*PowerPC*cough* add characters to the |
| 129 | * start of the function names. We simply put a '*' to | 128 | * start of the function names. We simply put a '*' to |
| 130 | * accomodate them. | 129 | * accommodate them. |
| 131 | */ | 130 | */ |
| 132 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); | 131 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
| 133 | 132 | ||
| 134 | /* filter only on our function */ | 133 | /* filter only on our function */ |
| 135 | ftrace_set_filter(func_name, strlen(func_name), 1); | 134 | ftrace_set_filter(func_name, strlen(func_name), 1); |
| 136 | 135 | ||
| 137 | /* enable tracing */ | 136 | /* enable tracing */ |
| 138 | ret = trace->init(tr); | 137 | ret = tracer_init(trace, tr); |
| 139 | if (ret) { | 138 | if (ret) { |
| 140 | warn_failed_init_tracer(trace, ret); | 139 | warn_failed_init_tracer(trace, ret); |
| 141 | goto out; | 140 | goto out; |
| @@ -209,7 +208,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
| 209 | ftrace_enabled = 1; | 208 | ftrace_enabled = 1; |
| 210 | tracer_enabled = 1; | 209 | tracer_enabled = 1; |
| 211 | 210 | ||
| 212 | ret = trace->init(tr); | 211 | ret = tracer_init(trace, tr); |
| 213 | if (ret) { | 212 | if (ret) { |
| 214 | warn_failed_init_tracer(trace, ret); | 213 | warn_failed_init_tracer(trace, ret); |
| 215 | goto out; | 214 | goto out; |
| @@ -247,6 +246,90 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
| 247 | } | 246 | } |
| 248 | #endif /* CONFIG_FUNCTION_TRACER */ | 247 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 249 | 248 | ||
| 249 | |||
| 250 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 251 | |||
| 252 | /* Maximum number of functions to trace before diagnosing a hang */ | ||
| 253 | #define GRAPH_MAX_FUNC_TEST 100000000 | ||
| 254 | |||
| 255 | static void __ftrace_dump(bool disable_tracing); | ||
| 256 | static unsigned int graph_hang_thresh; | ||
| 257 | |||
| 258 | /* Wrap the real function entry probe to avoid possible hanging */ | ||
| 259 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | ||
| 260 | { | ||
| 261 | /* This is harmlessly racy, we want to approximately detect a hang */ | ||
| 262 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | ||
| 263 | ftrace_graph_stop(); | ||
| 264 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | ||
| 265 | if (ftrace_dump_on_oops) | ||
| 266 | __ftrace_dump(false); | ||
| 267 | return 0; | ||
| 268 | } | ||
| 269 | |||
| 270 | return trace_graph_entry(trace); | ||
| 271 | } | ||
| 272 | |||
| 273 | /* | ||
| 274 | * Pretty much the same than for the function tracer from which the selftest | ||
| 275 | * has been borrowed. | ||
| 276 | */ | ||
| 277 | int | ||
| 278 | trace_selftest_startup_function_graph(struct tracer *trace, | ||
| 279 | struct trace_array *tr) | ||
| 280 | { | ||
| 281 | int ret; | ||
| 282 | unsigned long count; | ||
| 283 | |||
| 284 | /* | ||
| 285 | * Simulate the init() callback but we attach a watchdog callback | ||
| 286 | * to detect and recover from possible hangs | ||
| 287 | */ | ||
| 288 | tracing_reset_online_cpus(tr); | ||
| 289 | ret = register_ftrace_graph(&trace_graph_return, | ||
| 290 | &trace_graph_entry_watchdog); | ||
| 291 | if (ret) { | ||
| 292 | warn_failed_init_tracer(trace, ret); | ||
| 293 | goto out; | ||
| 294 | } | ||
| 295 | tracing_start_cmdline_record(); | ||
| 296 | |||
| 297 | /* Sleep for a 1/10 of a second */ | ||
| 298 | msleep(100); | ||
| 299 | |||
| 300 | /* Have we just recovered from a hang? */ | ||
| 301 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | ||
| 302 | tracing_selftest_disabled = true; | ||
| 303 | ret = -1; | ||
| 304 | goto out; | ||
| 305 | } | ||
| 306 | |||
| 307 | tracing_stop(); | ||
| 308 | |||
| 309 | /* check the trace buffer */ | ||
| 310 | ret = trace_test_buffer(tr, &count); | ||
| 311 | |||
| 312 | trace->reset(tr); | ||
| 313 | tracing_start(); | ||
| 314 | |||
| 315 | if (!ret && !count) { | ||
| 316 | printk(KERN_CONT ".. no entries found .."); | ||
| 317 | ret = -1; | ||
| 318 | goto out; | ||
| 319 | } | ||
| 320 | |||
| 321 | /* Don't test dynamic tracing, the function tracer already did */ | ||
| 322 | |||
| 323 | out: | ||
| 324 | /* Stop it if we failed */ | ||
| 325 | if (ret) | ||
| 326 | ftrace_graph_stop(); | ||
| 327 | |||
| 328 | return ret; | ||
| 329 | } | ||
| 330 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
| 331 | |||
| 332 | |||
| 250 | #ifdef CONFIG_IRQSOFF_TRACER | 333 | #ifdef CONFIG_IRQSOFF_TRACER |
| 251 | int | 334 | int |
| 252 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 335 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) |
| @@ -256,7 +339,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
| 256 | int ret; | 339 | int ret; |
| 257 | 340 | ||
| 258 | /* start the tracing */ | 341 | /* start the tracing */ |
| 259 | ret = trace->init(tr); | 342 | ret = tracer_init(trace, tr); |
| 260 | if (ret) { | 343 | if (ret) { |
| 261 | warn_failed_init_tracer(trace, ret); | 344 | warn_failed_init_tracer(trace, ret); |
| 262 | return ret; | 345 | return ret; |
| @@ -268,6 +351,14 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
| 268 | local_irq_disable(); | 351 | local_irq_disable(); |
| 269 | udelay(100); | 352 | udelay(100); |
| 270 | local_irq_enable(); | 353 | local_irq_enable(); |
| 354 | |||
| 355 | /* | ||
| 356 | * Stop the tracer to avoid a warning subsequent | ||
| 357 | * to buffer flipping failure because tracing_stop() | ||
| 358 | * disables the tr and max buffers, making flipping impossible | ||
| 359 | * in case of parallels max irqs off latencies. | ||
| 360 | */ | ||
| 361 | trace->stop(tr); | ||
| 271 | /* stop the tracing. */ | 362 | /* stop the tracing. */ |
| 272 | tracing_stop(); | 363 | tracing_stop(); |
| 273 | /* check both trace buffers */ | 364 | /* check both trace buffers */ |
| @@ -310,7 +401,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
| 310 | } | 401 | } |
| 311 | 402 | ||
| 312 | /* start the tracing */ | 403 | /* start the tracing */ |
| 313 | ret = trace->init(tr); | 404 | ret = tracer_init(trace, tr); |
| 314 | if (ret) { | 405 | if (ret) { |
| 315 | warn_failed_init_tracer(trace, ret); | 406 | warn_failed_init_tracer(trace, ret); |
| 316 | return ret; | 407 | return ret; |
| @@ -322,6 +413,14 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
| 322 | preempt_disable(); | 413 | preempt_disable(); |
| 323 | udelay(100); | 414 | udelay(100); |
| 324 | preempt_enable(); | 415 | preempt_enable(); |
| 416 | |||
| 417 | /* | ||
| 418 | * Stop the tracer to avoid a warning subsequent | ||
| 419 | * to buffer flipping failure because tracing_stop() | ||
| 420 | * disables the tr and max buffers, making flipping impossible | ||
| 421 | * in case of parallels max preempt off latencies. | ||
| 422 | */ | ||
| 423 | trace->stop(tr); | ||
| 325 | /* stop the tracing. */ | 424 | /* stop the tracing. */ |
| 326 | tracing_stop(); | 425 | tracing_stop(); |
| 327 | /* check both trace buffers */ | 426 | /* check both trace buffers */ |
| @@ -364,10 +463,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
| 364 | } | 463 | } |
| 365 | 464 | ||
| 366 | /* start the tracing */ | 465 | /* start the tracing */ |
| 367 | ret = trace->init(tr); | 466 | ret = tracer_init(trace, tr); |
| 368 | if (ret) { | 467 | if (ret) { |
| 369 | warn_failed_init_tracer(trace, ret); | 468 | warn_failed_init_tracer(trace, ret); |
| 370 | goto out; | 469 | goto out_no_start; |
| 371 | } | 470 | } |
| 372 | 471 | ||
| 373 | /* reset the max latency */ | 472 | /* reset the max latency */ |
| @@ -381,31 +480,35 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
| 381 | /* reverse the order of preempt vs irqs */ | 480 | /* reverse the order of preempt vs irqs */ |
| 382 | local_irq_enable(); | 481 | local_irq_enable(); |
| 383 | 482 | ||
| 483 | /* | ||
| 484 | * Stop the tracer to avoid a warning subsequent | ||
| 485 | * to buffer flipping failure because tracing_stop() | ||
| 486 | * disables the tr and max buffers, making flipping impossible | ||
| 487 | * in case of parallels max irqs/preempt off latencies. | ||
| 488 | */ | ||
| 489 | trace->stop(tr); | ||
| 384 | /* stop the tracing. */ | 490 | /* stop the tracing. */ |
| 385 | tracing_stop(); | 491 | tracing_stop(); |
| 386 | /* check both trace buffers */ | 492 | /* check both trace buffers */ |
| 387 | ret = trace_test_buffer(tr, NULL); | 493 | ret = trace_test_buffer(tr, NULL); |
| 388 | if (ret) { | 494 | if (ret) |
| 389 | tracing_start(); | ||
| 390 | goto out; | 495 | goto out; |
| 391 | } | ||
| 392 | 496 | ||
| 393 | ret = trace_test_buffer(&max_tr, &count); | 497 | ret = trace_test_buffer(&max_tr, &count); |
| 394 | if (ret) { | 498 | if (ret) |
| 395 | tracing_start(); | ||
| 396 | goto out; | 499 | goto out; |
| 397 | } | ||
| 398 | 500 | ||
| 399 | if (!ret && !count) { | 501 | if (!ret && !count) { |
| 400 | printk(KERN_CONT ".. no entries found .."); | 502 | printk(KERN_CONT ".. no entries found .."); |
| 401 | ret = -1; | 503 | ret = -1; |
| 402 | tracing_start(); | ||
| 403 | goto out; | 504 | goto out; |
| 404 | } | 505 | } |
| 405 | 506 | ||
| 406 | /* do the test by disabling interrupts first this time */ | 507 | /* do the test by disabling interrupts first this time */ |
| 407 | tracing_max_latency = 0; | 508 | tracing_max_latency = 0; |
| 408 | tracing_start(); | 509 | tracing_start(); |
| 510 | trace->start(tr); | ||
| 511 | |||
| 409 | preempt_disable(); | 512 | preempt_disable(); |
| 410 | local_irq_disable(); | 513 | local_irq_disable(); |
| 411 | udelay(100); | 514 | udelay(100); |
| @@ -413,6 +516,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
| 413 | /* reverse the order of preempt vs irqs */ | 516 | /* reverse the order of preempt vs irqs */ |
| 414 | local_irq_enable(); | 517 | local_irq_enable(); |
| 415 | 518 | ||
| 519 | trace->stop(tr); | ||
| 416 | /* stop the tracing. */ | 520 | /* stop the tracing. */ |
| 417 | tracing_stop(); | 521 | tracing_stop(); |
| 418 | /* check both trace buffers */ | 522 | /* check both trace buffers */ |
| @@ -428,9 +532,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
| 428 | goto out; | 532 | goto out; |
| 429 | } | 533 | } |
| 430 | 534 | ||
| 431 | out: | 535 | out: |
| 432 | trace->reset(tr); | ||
| 433 | tracing_start(); | 536 | tracing_start(); |
| 537 | out_no_start: | ||
| 538 | trace->reset(tr); | ||
| 434 | tracing_max_latency = save_max; | 539 | tracing_max_latency = save_max; |
| 435 | 540 | ||
| 436 | return ret; | 541 | return ret; |
| @@ -496,7 +601,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
| 496 | wait_for_completion(&isrt); | 601 | wait_for_completion(&isrt); |
| 497 | 602 | ||
| 498 | /* start the tracing */ | 603 | /* start the tracing */ |
| 499 | ret = trace->init(tr); | 604 | ret = tracer_init(trace, tr); |
| 500 | if (ret) { | 605 | if (ret) { |
| 501 | warn_failed_init_tracer(trace, ret); | 606 | warn_failed_init_tracer(trace, ret); |
| 502 | return ret; | 607 | return ret; |
| @@ -557,7 +662,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
| 557 | int ret; | 662 | int ret; |
| 558 | 663 | ||
| 559 | /* start the tracing */ | 664 | /* start the tracing */ |
| 560 | ret = trace->init(tr); | 665 | ret = tracer_init(trace, tr); |
| 561 | if (ret) { | 666 | if (ret) { |
| 562 | warn_failed_init_tracer(trace, ret); | 667 | warn_failed_init_tracer(trace, ret); |
| 563 | return ret; | 668 | return ret; |
| @@ -589,10 +694,10 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
| 589 | int ret; | 694 | int ret; |
| 590 | 695 | ||
| 591 | /* start the tracing */ | 696 | /* start the tracing */ |
| 592 | ret = trace->init(tr); | 697 | ret = tracer_init(trace, tr); |
| 593 | if (ret) { | 698 | if (ret) { |
| 594 | warn_failed_init_tracer(trace, ret); | 699 | warn_failed_init_tracer(trace, ret); |
| 595 | return 0; | 700 | return ret; |
| 596 | } | 701 | } |
| 597 | 702 | ||
| 598 | /* Sleep for a 1/10 of a second */ | 703 | /* Sleep for a 1/10 of a second */ |
| @@ -604,6 +709,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
| 604 | trace->reset(tr); | 709 | trace->reset(tr); |
| 605 | tracing_start(); | 710 | tracing_start(); |
| 606 | 711 | ||
| 712 | if (!ret && !count) { | ||
| 713 | printk(KERN_CONT ".. no entries found .."); | ||
| 714 | ret = -1; | ||
| 715 | } | ||
| 716 | |||
| 607 | return ret; | 717 | return ret; |
| 608 | } | 718 | } |
| 609 | #endif /* CONFIG_SYSPROF_TRACER */ | 719 | #endif /* CONFIG_SYSPROF_TRACER */ |
| @@ -616,7 +726,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
| 616 | int ret; | 726 | int ret; |
| 617 | 727 | ||
| 618 | /* start the tracing */ | 728 | /* start the tracing */ |
| 619 | ret = trace->init(tr); | 729 | ret = tracer_init(trace, tr); |
| 620 | if (ret) { | 730 | if (ret) { |
| 621 | warn_failed_init_tracer(trace, ret); | 731 | warn_failed_init_tracer(trace, ret); |
| 622 | return ret; | 732 | return ret; |
| @@ -631,6 +741,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
| 631 | trace->reset(tr); | 741 | trace->reset(tr); |
| 632 | tracing_start(); | 742 | tracing_start(); |
| 633 | 743 | ||
| 744 | if (!ret && !count) { | ||
| 745 | printk(KERN_CONT ".. no entries found .."); | ||
| 746 | ret = -1; | ||
| 747 | } | ||
| 748 | |||
| 634 | return ret; | 749 | return ret; |
| 635 | } | 750 | } |
| 636 | #endif /* CONFIG_BRANCH_TRACER */ | 751 | #endif /* CONFIG_BRANCH_TRACER */ |
