aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-03-24 17:59:10 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-03-24 20:51:46 -0400
commit9afecfbb95198ec3ea6d52cca4711ea314f29ec6 (patch)
tree3269004091cde68fa25a1e0a90001cded6364cfb
parentf631718de3ca24a9ae03595e937fe0b64cfaf456 (diff)
tracing: Postpone tracer start-up tests till the system is more robust
As tracing can now be enabled very early in boot up, even before some critical system services (like scheduling), do not run the tracer selftests until after early_initcall() is performed. If a tracer is registered before such time, it is saved off in a list and the test is run when the system is able to handle more diverse functions. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace.c71
1 files changed, 71 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6757561d9617..68a6f78f6862 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1424,6 +1424,28 @@ static int wait_on_pipe(struct trace_iterator *iter, bool full)
1424} 1424}
1425 1425
1426#ifdef CONFIG_FTRACE_STARTUP_TEST 1426#ifdef CONFIG_FTRACE_STARTUP_TEST
1427static bool selftests_can_run;
1428
1429struct trace_selftests {
1430 struct list_head list;
1431 struct tracer *type;
1432};
1433
1434static LIST_HEAD(postponed_selftests);
1435
1436static int save_selftest(struct tracer *type)
1437{
1438 struct trace_selftests *selftest;
1439
1440 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1441 if (!selftest)
1442 return -ENOMEM;
1443
1444 selftest->type = type;
1445 list_add(&selftest->list, &postponed_selftests);
1446 return 0;
1447}
1448
1427static int run_tracer_selftest(struct tracer *type) 1449static int run_tracer_selftest(struct tracer *type)
1428{ 1450{
1429 struct trace_array *tr = &global_trace; 1451 struct trace_array *tr = &global_trace;
@@ -1434,6 +1456,14 @@ static int run_tracer_selftest(struct tracer *type)
1434 return 0; 1456 return 0;
1435 1457
1436 /* 1458 /*
1459 * If a tracer registers early in boot up (before scheduling is
1460 * initialized and such), then do not run its selftests yet.
1461 * Instead, run it a little later in the boot process.
1462 */
1463 if (!selftests_can_run)
1464 return save_selftest(type);
1465
1466 /*
1437 * Run a selftest on this tracer. 1467 * Run a selftest on this tracer.
1438 * Here we reset the trace buffer, and set the current 1468 * Here we reset the trace buffer, and set the current
1439 * tracer to be this tracer. The tracer can then run some 1469 * tracer to be this tracer. The tracer can then run some
@@ -1482,6 +1512,47 @@ static int run_tracer_selftest(struct tracer *type)
1482 printk(KERN_CONT "PASSED\n"); 1512 printk(KERN_CONT "PASSED\n");
1483 return 0; 1513 return 0;
1484} 1514}
1515
1516static __init int init_trace_selftests(void)
1517{
1518 struct trace_selftests *p, *n;
1519 struct tracer *t, **last;
1520 int ret;
1521
1522 selftests_can_run = true;
1523
1524 mutex_lock(&trace_types_lock);
1525
1526 if (list_empty(&postponed_selftests))
1527 goto out;
1528
1529 pr_info("Running postponed tracer tests:\n");
1530
1531 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1532 ret = run_tracer_selftest(p->type);
1533 /* If the test fails, then warn and remove from available_tracers */
1534 if (ret < 0) {
1535 WARN(1, "tracer: %s failed selftest, disabling\n",
1536 p->type->name);
1537 last = &trace_types;
1538 for (t = trace_types; t; t = t->next) {
1539 if (t == p->type) {
1540 *last = t->next;
1541 break;
1542 }
1543 last = &t->next;
1544 }
1545 }
1546 list_del(&p->list);
1547 kfree(p);
1548 }
1549
1550 out:
1551 mutex_unlock(&trace_types_lock);
1552
1553 return 0;
1554}
1555early_initcall(init_trace_selftests);
1485#else 1556#else
1486static inline int run_tracer_selftest(struct tracer *type) 1557static inline int run_tracer_selftest(struct tracer *type)
1487{ 1558{