aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c1
-rw-r--r--kernel/trace/trace.c18
2 files changed, 15 insertions, 4 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index caa4fda50f8..85ced143c2c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -622,6 +622,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
622 list_del_init(&page->list); 622 list_del_init(&page->list);
623 free_buffer_page(page); 623 free_buffer_page(page);
624 } 624 }
625 mutex_unlock(&buffer->mutex);
625 return -ENOMEM; 626 return -ENOMEM;
626} 627}
627 628
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2596b5a968c..5653c6b07ba 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -518,7 +518,15 @@ int register_tracer(struct tracer *type)
518 return -1; 518 return -1;
519 } 519 }
520 520
521 /*
522 * When this gets called we hold the BKL which means that
523 * preemption is disabled. Various trace selftests however
524 * need to disable and enable preemption for successful tests.
525 * So we drop the BKL here and grab it after the tests again.
526 */
527 unlock_kernel();
521 mutex_lock(&trace_types_lock); 528 mutex_lock(&trace_types_lock);
529
522 for (t = trace_types; t; t = t->next) { 530 for (t = trace_types; t; t = t->next) {
523 if (strcmp(type->name, t->name) == 0) { 531 if (strcmp(type->name, t->name) == 0) {
524 /* already found */ 532 /* already found */
@@ -541,9 +549,9 @@ int register_tracer(struct tracer *type)
541 * internal tracing to verify that everything is in order. 549 * internal tracing to verify that everything is in order.
542 * If we fail, we do not register this tracer. 550 * If we fail, we do not register this tracer.
543 */ 551 */
544 for_each_tracing_cpu(i) { 552 for_each_tracing_cpu(i)
545 tracing_reset(tr, i); 553 tracing_reset(tr, i);
546 } 554
547 current_trace = type; 555 current_trace = type;
548 /* the test is responsible for initializing and enabling */ 556 /* the test is responsible for initializing and enabling */
549 pr_info("Testing tracer %s: ", type->name); 557 pr_info("Testing tracer %s: ", type->name);
@@ -555,9 +563,9 @@ int register_tracer(struct tracer *type)
555 goto out; 563 goto out;
556 } 564 }
557 /* Only reset on passing, to avoid touching corrupted buffers */ 565 /* Only reset on passing, to avoid touching corrupted buffers */
558 for_each_tracing_cpu(i) { 566 for_each_tracing_cpu(i)
559 tracing_reset(tr, i); 567 tracing_reset(tr, i);
560 } 568
561 printk(KERN_CONT "PASSED\n"); 569 printk(KERN_CONT "PASSED\n");
562 } 570 }
563#endif 571#endif
@@ -570,6 +578,7 @@ int register_tracer(struct tracer *type)
570 578
571 out: 579 out:
572 mutex_unlock(&trace_types_lock); 580 mutex_unlock(&trace_types_lock);
581 lock_kernel();
573 582
574 return ret; 583 return ret;
575} 584}
@@ -2178,6 +2187,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2178 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2187 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2179 } 2188 }
2180 mutex_unlock(&trace_types_lock); 2189 mutex_unlock(&trace_types_lock);
2190 kfree(iter);
2181 2191
2182 return ERR_PTR(-ENOMEM); 2192 return ERR_PTR(-ENOMEM);
2183} 2193}