aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_selftest.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 07:34:26 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 07:34:42 -0400
commit2e8844e13ab73f1107aea4317a53ff5879f2e1d7 (patch)
tree36165371cf6fd26d674610f1c6bb5fac50e6e13f /kernel/trace/trace_selftest.c
parentc78a3956b982418186e40978a51636a2b43221bc (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into tracing/hw-branch-tracing
Merge reason: update to latest tracing and ptrace APIs Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r--kernel/trace/trace_selftest.c80
1 files changed, 69 insertions, 11 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index b91091267067..499d01c44cd1 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -250,6 +250,28 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
250 250
251 251
252#ifdef CONFIG_FUNCTION_GRAPH_TRACER 252#ifdef CONFIG_FUNCTION_GRAPH_TRACER
253
254/* Maximum number of functions to trace before diagnosing a hang */
255#define GRAPH_MAX_FUNC_TEST 100000000
256
257static void __ftrace_dump(bool disable_tracing);
258static unsigned int graph_hang_thresh;
259
260/* Wrap the real function entry probe to avoid possible hanging */
261static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
262{
263 /* This is harmlessly racy, we want to approximately detect a hang */
264 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
265 ftrace_graph_stop();
266 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
267 if (ftrace_dump_on_oops)
268 __ftrace_dump(false);
269 return 0;
270 }
271
272 return trace_graph_entry(trace);
273}
274
253/* 275/*
254 * Pretty much the same than for the function tracer from which the selftest 276 * Pretty much the same than for the function tracer from which the selftest
255 * has been borrowed. 277 * has been borrowed.
@@ -261,15 +283,29 @@ trace_selftest_startup_function_graph(struct tracer *trace,
261 int ret; 283 int ret;
262 unsigned long count; 284 unsigned long count;
263 285
264 ret = tracer_init(trace, tr); 286 /*
287 * Simulate the init() callback but we attach a watchdog callback
288 * to detect and recover from possible hangs
289 */
290 tracing_reset_online_cpus(tr);
291 ret = register_ftrace_graph(&trace_graph_return,
292 &trace_graph_entry_watchdog);
265 if (ret) { 293 if (ret) {
266 warn_failed_init_tracer(trace, ret); 294 warn_failed_init_tracer(trace, ret);
267 goto out; 295 goto out;
268 } 296 }
297 tracing_start_cmdline_record();
269 298
270 /* Sleep for a 1/10 of a second */ 299 /* Sleep for a 1/10 of a second */
271 msleep(100); 300 msleep(100);
272 301
302 /* Have we just recovered from a hang? */
303 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
304 tracing_selftest_disabled = true;
305 ret = -1;
306 goto out;
307 }
308
273 tracing_stop(); 309 tracing_stop();
274 310
275 /* check the trace buffer */ 311 /* check the trace buffer */
@@ -317,6 +353,14 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
317 local_irq_disable(); 353 local_irq_disable();
318 udelay(100); 354 udelay(100);
319 local_irq_enable(); 355 local_irq_enable();
356
357 /*
358 * Stop the tracer to avoid a warning subsequent
359 * to buffer flipping failure because tracing_stop()
360 * disables the tr and max buffers, making flipping impossible
361 * in case of parallels max irqs off latencies.
362 */
363 trace->stop(tr);
320 /* stop the tracing. */ 364 /* stop the tracing. */
321 tracing_stop(); 365 tracing_stop();
322 /* check both trace buffers */ 366 /* check both trace buffers */
@@ -371,6 +415,14 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
371 preempt_disable(); 415 preempt_disable();
372 udelay(100); 416 udelay(100);
373 preempt_enable(); 417 preempt_enable();
418
419 /*
420 * Stop the tracer to avoid a warning subsequent
421 * to buffer flipping failure because tracing_stop()
422 * disables the tr and max buffers, making flipping impossible
423 * in case of parallels max preempt off latencies.
424 */
425 trace->stop(tr);
374 /* stop the tracing. */ 426 /* stop the tracing. */
375 tracing_stop(); 427 tracing_stop();
376 /* check both trace buffers */ 428 /* check both trace buffers */
@@ -416,7 +468,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
416 ret = tracer_init(trace, tr); 468 ret = tracer_init(trace, tr);
417 if (ret) { 469 if (ret) {
418 warn_failed_init_tracer(trace, ret); 470 warn_failed_init_tracer(trace, ret);
419 goto out; 471 goto out_no_start;
420 } 472 }
421 473
422 /* reset the max latency */ 474 /* reset the max latency */
@@ -430,31 +482,35 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
430 /* reverse the order of preempt vs irqs */ 482 /* reverse the order of preempt vs irqs */
431 local_irq_enable(); 483 local_irq_enable();
432 484
485 /*
486 * Stop the tracer to avoid a warning subsequent
487 * to buffer flipping failure because tracing_stop()
488 * disables the tr and max buffers, making flipping impossible
489 * in case of parallels max irqs/preempt off latencies.
490 */
491 trace->stop(tr);
433 /* stop the tracing. */ 492 /* stop the tracing. */
434 tracing_stop(); 493 tracing_stop();
435 /* check both trace buffers */ 494 /* check both trace buffers */
436 ret = trace_test_buffer(tr, NULL); 495 ret = trace_test_buffer(tr, NULL);
437 if (ret) { 496 if (ret)
438 tracing_start();
439 goto out; 497 goto out;
440 }
441 498
442 ret = trace_test_buffer(&max_tr, &count); 499 ret = trace_test_buffer(&max_tr, &count);
443 if (ret) { 500 if (ret)
444 tracing_start();
445 goto out; 501 goto out;
446 }
447 502
448 if (!ret && !count) { 503 if (!ret && !count) {
449 printk(KERN_CONT ".. no entries found .."); 504 printk(KERN_CONT ".. no entries found ..");
450 ret = -1; 505 ret = -1;
451 tracing_start();
452 goto out; 506 goto out;
453 } 507 }
454 508
455 /* do the test by disabling interrupts first this time */ 509 /* do the test by disabling interrupts first this time */
456 tracing_max_latency = 0; 510 tracing_max_latency = 0;
457 tracing_start(); 511 tracing_start();
512 trace->start(tr);
513
458 preempt_disable(); 514 preempt_disable();
459 local_irq_disable(); 515 local_irq_disable();
460 udelay(100); 516 udelay(100);
@@ -462,6 +518,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
462 /* reverse the order of preempt vs irqs */ 518 /* reverse the order of preempt vs irqs */
463 local_irq_enable(); 519 local_irq_enable();
464 520
521 trace->stop(tr);
465 /* stop the tracing. */ 522 /* stop the tracing. */
466 tracing_stop(); 523 tracing_stop();
467 /* check both trace buffers */ 524 /* check both trace buffers */
@@ -477,9 +534,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
477 goto out; 534 goto out;
478 } 535 }
479 536
480 out: 537out:
481 trace->reset(tr);
482 tracing_start(); 538 tracing_start();
539out_no_start:
540 trace->reset(tr);
483 tracing_max_latency = save_max; 541 tracing_max_latency = save_max;
484 542
485 return ret; 543 return ret;