diff options
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r-- | kernel/trace/trace_selftest.c | 188 |
1 files changed, 161 insertions, 27 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 88c8eb70f54a..08f4eb2763d1 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* Include in trace.c */ | 1 | /* Include in trace.c */ |
2 | 2 | ||
3 | #include <linux/stringify.h> | ||
3 | #include <linux/kthread.h> | 4 | #include <linux/kthread.h> |
4 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
5 | 6 | ||
@@ -9,11 +10,12 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
9 | case TRACE_FN: | 10 | case TRACE_FN: |
10 | case TRACE_CTX: | 11 | case TRACE_CTX: |
11 | case TRACE_WAKE: | 12 | case TRACE_WAKE: |
12 | case TRACE_CONT: | ||
13 | case TRACE_STACK: | 13 | case TRACE_STACK: |
14 | case TRACE_PRINT: | 14 | case TRACE_PRINT: |
15 | case TRACE_SPECIAL: | 15 | case TRACE_SPECIAL: |
16 | case TRACE_BRANCH: | 16 | case TRACE_BRANCH: |
17 | case TRACE_GRAPH_ENT: | ||
18 | case TRACE_GRAPH_RET: | ||
17 | return 1; | 19 | return 1; |
18 | } | 20 | } |
19 | return 0; | 21 | return 0; |
@@ -23,10 +25,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
23 | { | 25 | { |
24 | struct ring_buffer_event *event; | 26 | struct ring_buffer_event *event; |
25 | struct trace_entry *entry; | 27 | struct trace_entry *entry; |
28 | unsigned int loops = 0; | ||
26 | 29 | ||
27 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 30 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
28 | entry = ring_buffer_event_data(event); | 31 | entry = ring_buffer_event_data(event); |
29 | 32 | ||
33 | /* | ||
34 | * The ring buffer is a size of trace_buf_size, if | ||
35 | * we loop more than the size, there's something wrong | ||
36 | * with the ring buffer. | ||
37 | */ | ||
38 | if (loops++ > trace_buf_size) { | ||
39 | printk(KERN_CONT ".. bad ring buffer "); | ||
40 | goto failed; | ||
41 | } | ||
30 | if (!trace_valid_entry(entry)) { | 42 | if (!trace_valid_entry(entry)) { |
31 | printk(KERN_CONT ".. invalid entry %d ", | 43 | printk(KERN_CONT ".. invalid entry %d ", |
32 | entry->type); | 44 | entry->type); |
@@ -57,11 +69,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
57 | 69 | ||
58 | cnt = ring_buffer_entries(tr->buffer); | 70 | cnt = ring_buffer_entries(tr->buffer); |
59 | 71 | ||
72 | /* | ||
73 | * The trace_test_buffer_cpu runs a while loop to consume all data. | ||
74 | * If the calling tracer is broken, and is constantly filling | ||
75 | * the buffer, this will run forever, and hard lock the box. | ||
76 | * We disable the ring buffer while we do this test to prevent | ||
77 | * a hard lock up. | ||
78 | */ | ||
79 | tracing_off(); | ||
60 | for_each_possible_cpu(cpu) { | 80 | for_each_possible_cpu(cpu) { |
61 | ret = trace_test_buffer_cpu(tr, cpu); | 81 | ret = trace_test_buffer_cpu(tr, cpu); |
62 | if (ret) | 82 | if (ret) |
63 | break; | 83 | break; |
64 | } | 84 | } |
85 | tracing_on(); | ||
65 | __raw_spin_unlock(&ftrace_max_lock); | 86 | __raw_spin_unlock(&ftrace_max_lock); |
66 | local_irq_restore(flags); | 87 | local_irq_restore(flags); |
67 | 88 | ||
@@ -80,9 +101,6 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | |||
80 | 101 | ||
81 | #ifdef CONFIG_DYNAMIC_FTRACE | 102 | #ifdef CONFIG_DYNAMIC_FTRACE |
82 | 103 | ||
83 | #define __STR(x) #x | ||
84 | #define STR(x) __STR(x) | ||
85 | |||
86 | /* Test dynamic code modification and ftrace filters */ | 104 | /* Test dynamic code modification and ftrace filters */ |
87 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 105 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
88 | struct trace_array *tr, | 106 | struct trace_array *tr, |
@@ -106,17 +124,17 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
106 | func(); | 124 | func(); |
107 | 125 | ||
108 | /* | 126 | /* |
109 | * Some archs *cough*PowerPC*cough* add charachters to the | 127 | * Some archs *cough*PowerPC*cough* add characters to the |
110 | * start of the function names. We simply put a '*' to | 128 | * start of the function names. We simply put a '*' to |
111 | * accomodate them. | 129 | * accommodate them. |
112 | */ | 130 | */ |
113 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); | 131 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
114 | 132 | ||
115 | /* filter only on our function */ | 133 | /* filter only on our function */ |
116 | ftrace_set_filter(func_name, strlen(func_name), 1); | 134 | ftrace_set_filter(func_name, strlen(func_name), 1); |
117 | 135 | ||
118 | /* enable tracing */ | 136 | /* enable tracing */ |
119 | ret = trace->init(tr); | 137 | ret = tracer_init(trace, tr); |
120 | if (ret) { | 138 | if (ret) { |
121 | warn_failed_init_tracer(trace, ret); | 139 | warn_failed_init_tracer(trace, ret); |
122 | goto out; | 140 | goto out; |
@@ -190,7 +208,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
190 | ftrace_enabled = 1; | 208 | ftrace_enabled = 1; |
191 | tracer_enabled = 1; | 209 | tracer_enabled = 1; |
192 | 210 | ||
193 | ret = trace->init(tr); | 211 | ret = tracer_init(trace, tr); |
194 | if (ret) { | 212 | if (ret) { |
195 | warn_failed_init_tracer(trace, ret); | 213 | warn_failed_init_tracer(trace, ret); |
196 | goto out; | 214 | goto out; |
@@ -228,6 +246,90 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
228 | } | 246 | } |
229 | #endif /* CONFIG_FUNCTION_TRACER */ | 247 | #endif /* CONFIG_FUNCTION_TRACER */ |
230 | 248 | ||
249 | |||
250 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
251 | |||
252 | /* Maximum number of functions to trace before diagnosing a hang */ | ||
253 | #define GRAPH_MAX_FUNC_TEST 100000000 | ||
254 | |||
255 | static void __ftrace_dump(bool disable_tracing); | ||
256 | static unsigned int graph_hang_thresh; | ||
257 | |||
258 | /* Wrap the real function entry probe to avoid possible hanging */ | ||
259 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | ||
260 | { | ||
261 | /* This is harmlessly racy, we want to approximately detect a hang */ | ||
262 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | ||
263 | ftrace_graph_stop(); | ||
264 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | ||
265 | if (ftrace_dump_on_oops) | ||
266 | __ftrace_dump(false); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | return trace_graph_entry(trace); | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Pretty much the same than for the function tracer from which the selftest | ||
275 | * has been borrowed. | ||
276 | */ | ||
277 | int | ||
278 | trace_selftest_startup_function_graph(struct tracer *trace, | ||
279 | struct trace_array *tr) | ||
280 | { | ||
281 | int ret; | ||
282 | unsigned long count; | ||
283 | |||
284 | /* | ||
285 | * Simulate the init() callback but we attach a watchdog callback | ||
286 | * to detect and recover from possible hangs | ||
287 | */ | ||
288 | tracing_reset_online_cpus(tr); | ||
289 | ret = register_ftrace_graph(&trace_graph_return, | ||
290 | &trace_graph_entry_watchdog); | ||
291 | if (ret) { | ||
292 | warn_failed_init_tracer(trace, ret); | ||
293 | goto out; | ||
294 | } | ||
295 | tracing_start_cmdline_record(); | ||
296 | |||
297 | /* Sleep for a 1/10 of a second */ | ||
298 | msleep(100); | ||
299 | |||
300 | /* Have we just recovered from a hang? */ | ||
301 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | ||
302 | tracing_selftest_disabled = true; | ||
303 | ret = -1; | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | tracing_stop(); | ||
308 | |||
309 | /* check the trace buffer */ | ||
310 | ret = trace_test_buffer(tr, &count); | ||
311 | |||
312 | trace->reset(tr); | ||
313 | tracing_start(); | ||
314 | |||
315 | if (!ret && !count) { | ||
316 | printk(KERN_CONT ".. no entries found .."); | ||
317 | ret = -1; | ||
318 | goto out; | ||
319 | } | ||
320 | |||
321 | /* Don't test dynamic tracing, the function tracer already did */ | ||
322 | |||
323 | out: | ||
324 | /* Stop it if we failed */ | ||
325 | if (ret) | ||
326 | ftrace_graph_stop(); | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
331 | |||
332 | |||
231 | #ifdef CONFIG_IRQSOFF_TRACER | 333 | #ifdef CONFIG_IRQSOFF_TRACER |
232 | int | 334 | int |
233 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 335 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) |
@@ -237,7 +339,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
237 | int ret; | 339 | int ret; |
238 | 340 | ||
239 | /* start the tracing */ | 341 | /* start the tracing */ |
240 | ret = trace->init(tr); | 342 | ret = tracer_init(trace, tr); |
241 | if (ret) { | 343 | if (ret) { |
242 | warn_failed_init_tracer(trace, ret); | 344 | warn_failed_init_tracer(trace, ret); |
243 | return ret; | 345 | return ret; |
@@ -249,6 +351,14 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
249 | local_irq_disable(); | 351 | local_irq_disable(); |
250 | udelay(100); | 352 | udelay(100); |
251 | local_irq_enable(); | 353 | local_irq_enable(); |
354 | |||
355 | /* | ||
356 | * Stop the tracer to avoid a warning subsequent | ||
357 | * to buffer flipping failure because tracing_stop() | ||
358 | * disables the tr and max buffers, making flipping impossible | ||
359 | * in case of parallels max irqs off latencies. | ||
360 | */ | ||
361 | trace->stop(tr); | ||
252 | /* stop the tracing. */ | 362 | /* stop the tracing. */ |
253 | tracing_stop(); | 363 | tracing_stop(); |
254 | /* check both trace buffers */ | 364 | /* check both trace buffers */ |
@@ -291,7 +401,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
291 | } | 401 | } |
292 | 402 | ||
293 | /* start the tracing */ | 403 | /* start the tracing */ |
294 | ret = trace->init(tr); | 404 | ret = tracer_init(trace, tr); |
295 | if (ret) { | 405 | if (ret) { |
296 | warn_failed_init_tracer(trace, ret); | 406 | warn_failed_init_tracer(trace, ret); |
297 | return ret; | 407 | return ret; |
@@ -303,6 +413,14 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
303 | preempt_disable(); | 413 | preempt_disable(); |
304 | udelay(100); | 414 | udelay(100); |
305 | preempt_enable(); | 415 | preempt_enable(); |
416 | |||
417 | /* | ||
418 | * Stop the tracer to avoid a warning subsequent | ||
419 | * to buffer flipping failure because tracing_stop() | ||
420 | * disables the tr and max buffers, making flipping impossible | ||
421 | * in case of parallels max preempt off latencies. | ||
422 | */ | ||
423 | trace->stop(tr); | ||
306 | /* stop the tracing. */ | 424 | /* stop the tracing. */ |
307 | tracing_stop(); | 425 | tracing_stop(); |
308 | /* check both trace buffers */ | 426 | /* check both trace buffers */ |
@@ -345,10 +463,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
345 | } | 463 | } |
346 | 464 | ||
347 | /* start the tracing */ | 465 | /* start the tracing */ |
348 | ret = trace->init(tr); | 466 | ret = tracer_init(trace, tr); |
349 | if (ret) { | 467 | if (ret) { |
350 | warn_failed_init_tracer(trace, ret); | 468 | warn_failed_init_tracer(trace, ret); |
351 | goto out; | 469 | goto out_no_start; |
352 | } | 470 | } |
353 | 471 | ||
354 | /* reset the max latency */ | 472 | /* reset the max latency */ |
@@ -362,31 +480,35 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
362 | /* reverse the order of preempt vs irqs */ | 480 | /* reverse the order of preempt vs irqs */ |
363 | local_irq_enable(); | 481 | local_irq_enable(); |
364 | 482 | ||
483 | /* | ||
484 | * Stop the tracer to avoid a warning subsequent | ||
485 | * to buffer flipping failure because tracing_stop() | ||
486 | * disables the tr and max buffers, making flipping impossible | ||
487 | * in case of parallels max irqs/preempt off latencies. | ||
488 | */ | ||
489 | trace->stop(tr); | ||
365 | /* stop the tracing. */ | 490 | /* stop the tracing. */ |
366 | tracing_stop(); | 491 | tracing_stop(); |
367 | /* check both trace buffers */ | 492 | /* check both trace buffers */ |
368 | ret = trace_test_buffer(tr, NULL); | 493 | ret = trace_test_buffer(tr, NULL); |
369 | if (ret) { | 494 | if (ret) |
370 | tracing_start(); | ||
371 | goto out; | 495 | goto out; |
372 | } | ||
373 | 496 | ||
374 | ret = trace_test_buffer(&max_tr, &count); | 497 | ret = trace_test_buffer(&max_tr, &count); |
375 | if (ret) { | 498 | if (ret) |
376 | tracing_start(); | ||
377 | goto out; | 499 | goto out; |
378 | } | ||
379 | 500 | ||
380 | if (!ret && !count) { | 501 | if (!ret && !count) { |
381 | printk(KERN_CONT ".. no entries found .."); | 502 | printk(KERN_CONT ".. no entries found .."); |
382 | ret = -1; | 503 | ret = -1; |
383 | tracing_start(); | ||
384 | goto out; | 504 | goto out; |
385 | } | 505 | } |
386 | 506 | ||
387 | /* do the test by disabling interrupts first this time */ | 507 | /* do the test by disabling interrupts first this time */ |
388 | tracing_max_latency = 0; | 508 | tracing_max_latency = 0; |
389 | tracing_start(); | 509 | tracing_start(); |
510 | trace->start(tr); | ||
511 | |||
390 | preempt_disable(); | 512 | preempt_disable(); |
391 | local_irq_disable(); | 513 | local_irq_disable(); |
392 | udelay(100); | 514 | udelay(100); |
@@ -394,6 +516,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
394 | /* reverse the order of preempt vs irqs */ | 516 | /* reverse the order of preempt vs irqs */ |
395 | local_irq_enable(); | 517 | local_irq_enable(); |
396 | 518 | ||
519 | trace->stop(tr); | ||
397 | /* stop the tracing. */ | 520 | /* stop the tracing. */ |
398 | tracing_stop(); | 521 | tracing_stop(); |
399 | /* check both trace buffers */ | 522 | /* check both trace buffers */ |
@@ -409,9 +532,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
409 | goto out; | 532 | goto out; |
410 | } | 533 | } |
411 | 534 | ||
412 | out: | 535 | out: |
413 | trace->reset(tr); | ||
414 | tracing_start(); | 536 | tracing_start(); |
537 | out_no_start: | ||
538 | trace->reset(tr); | ||
415 | tracing_max_latency = save_max; | 539 | tracing_max_latency = save_max; |
416 | 540 | ||
417 | return ret; | 541 | return ret; |
@@ -477,7 +601,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
477 | wait_for_completion(&isrt); | 601 | wait_for_completion(&isrt); |
478 | 602 | ||
479 | /* start the tracing */ | 603 | /* start the tracing */ |
480 | ret = trace->init(tr); | 604 | ret = tracer_init(trace, tr); |
481 | if (ret) { | 605 | if (ret) { |
482 | warn_failed_init_tracer(trace, ret); | 606 | warn_failed_init_tracer(trace, ret); |
483 | return ret; | 607 | return ret; |
@@ -538,7 +662,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
538 | int ret; | 662 | int ret; |
539 | 663 | ||
540 | /* start the tracing */ | 664 | /* start the tracing */ |
541 | ret = trace->init(tr); | 665 | ret = tracer_init(trace, tr); |
542 | if (ret) { | 666 | if (ret) { |
543 | warn_failed_init_tracer(trace, ret); | 667 | warn_failed_init_tracer(trace, ret); |
544 | return ret; | 668 | return ret; |
@@ -570,10 +694,10 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
570 | int ret; | 694 | int ret; |
571 | 695 | ||
572 | /* start the tracing */ | 696 | /* start the tracing */ |
573 | ret = trace->init(tr); | 697 | ret = tracer_init(trace, tr); |
574 | if (ret) { | 698 | if (ret) { |
575 | warn_failed_init_tracer(trace, ret); | 699 | warn_failed_init_tracer(trace, ret); |
576 | return 0; | 700 | return ret; |
577 | } | 701 | } |
578 | 702 | ||
579 | /* Sleep for a 1/10 of a second */ | 703 | /* Sleep for a 1/10 of a second */ |
@@ -585,6 +709,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
585 | trace->reset(tr); | 709 | trace->reset(tr); |
586 | tracing_start(); | 710 | tracing_start(); |
587 | 711 | ||
712 | if (!ret && !count) { | ||
713 | printk(KERN_CONT ".. no entries found .."); | ||
714 | ret = -1; | ||
715 | } | ||
716 | |||
588 | return ret; | 717 | return ret; |
589 | } | 718 | } |
590 | #endif /* CONFIG_SYSPROF_TRACER */ | 719 | #endif /* CONFIG_SYSPROF_TRACER */ |
@@ -597,7 +726,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
597 | int ret; | 726 | int ret; |
598 | 727 | ||
599 | /* start the tracing */ | 728 | /* start the tracing */ |
600 | ret = trace->init(tr); | 729 | ret = tracer_init(trace, tr); |
601 | if (ret) { | 730 | if (ret) { |
602 | warn_failed_init_tracer(trace, ret); | 731 | warn_failed_init_tracer(trace, ret); |
603 | return ret; | 732 | return ret; |
@@ -612,6 +741,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
612 | trace->reset(tr); | 741 | trace->reset(tr); |
613 | tracing_start(); | 742 | tracing_start(); |
614 | 743 | ||
744 | if (!ret && !count) { | ||
745 | printk(KERN_CONT ".. no entries found .."); | ||
746 | ret = -1; | ||
747 | } | ||
748 | |||
615 | return ret; | 749 | return ret; |
616 | } | 750 | } |
617 | #endif /* CONFIG_BRANCH_TRACER */ | 751 | #endif /* CONFIG_BRANCH_TRACER */ |