aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 16892121cb7c..24b6238884f0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -520,7 +520,15 @@ int register_tracer(struct tracer *type)
520 return -1; 520 return -1;
521 } 521 }
522 522
523 /*
524 * When this gets called we hold the BKL which means that
525 * preemption is disabled. Various trace selftests however
526 * need to disable and enable preemption for successful tests.
527 * So we drop the BKL here and grab it after the tests again.
528 */
529 unlock_kernel();
523 mutex_lock(&trace_types_lock); 530 mutex_lock(&trace_types_lock);
531
524 for (t = trace_types; t; t = t->next) { 532 for (t = trace_types; t; t = t->next) {
525 if (strcmp(type->name, t->name) == 0) { 533 if (strcmp(type->name, t->name) == 0) {
526 /* already found */ 534 /* already found */
@@ -532,13 +540,6 @@ int register_tracer(struct tracer *type)
532 } 540 }
533 541
534#ifdef CONFIG_FTRACE_STARTUP_TEST 542#ifdef CONFIG_FTRACE_STARTUP_TEST
535 /*
536 * When this gets called we hold the BKL which means that preemption
537 * is disabled. Various trace selftests however need to disable
538 * and enable preemption for successful tests. So we drop the BKL here
539 * and grab it after the tests again.
540 */
541 unlock_kernel();
542 if (type->selftest) { 543 if (type->selftest) {
543 struct tracer *saved_tracer = current_trace; 544 struct tracer *saved_tracer = current_trace;
544 struct trace_array *tr = &global_trace; 545 struct trace_array *tr = &global_trace;
@@ -550,9 +551,9 @@ int register_tracer(struct tracer *type)
550 * internal tracing to verify that everything is in order. 551 * internal tracing to verify that everything is in order.
551 * If we fail, we do not register this tracer. 552 * If we fail, we do not register this tracer.
552 */ 553 */
553 for_each_tracing_cpu(i) { 554 for_each_tracing_cpu(i)
554 tracing_reset(tr, i); 555 tracing_reset(tr, i);
555 } 556
556 current_trace = type; 557 current_trace = type;
557 /* the test is responsible for initializing and enabling */ 558 /* the test is responsible for initializing and enabling */
558 pr_info("Testing tracer %s: ", type->name); 559 pr_info("Testing tracer %s: ", type->name);
@@ -564,12 +565,11 @@ int register_tracer(struct tracer *type)
564 goto out; 565 goto out;
565 } 566 }
566 /* Only reset on passing, to avoid touching corrupted buffers */ 567 /* Only reset on passing, to avoid touching corrupted buffers */
567 for_each_tracing_cpu(i) { 568 for_each_tracing_cpu(i)
568 tracing_reset(tr, i); 569 tracing_reset(tr, i);
569 } 570
570 printk(KERN_CONT "PASSED\n"); 571 printk(KERN_CONT "PASSED\n");
571 } 572 }
572 lock_kernel();
573#endif 573#endif
574 574
575 type->next = trace_types; 575 type->next = trace_types;
@@ -580,6 +580,7 @@ int register_tracer(struct tracer *type)
580 580
581 out: 581 out:
582 mutex_unlock(&trace_types_lock); 582 mutex_unlock(&trace_types_lock);
583 lock_kernel();
583 584
584 return ret; 585 return ret;
585} 586}