diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-11-10 21:46:00 -0500 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2008-11-10 21:47:35 -0500 |
commit | bf5e6519b85b3853f2d0bb4f17a4e2eaeffeb574 (patch) | |
tree | d5560a28100aed24e3bb09e68c17a0ea1fbde07b /kernel/trace/trace.c | |
parent | 3ad4f597058301c97f362e500a32f63f5c950a45 (diff) |
ftrace: disable tracing on resize
Impact: fix for bug on resize
This patch addresses the bug found here:
http://bugzilla.kernel.org/show_bug.cgi?id=11996
When ftrace converted to the new unified trace buffer, the resizing of
the buffer was not protected as much as it was originally. If tracing
is performed while the resize occurs, then the buffer can be corrupted.
This patch disables all ftrace buffer modifications before a resize
takes place.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9f3b478f9171..abfa8103d046 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2676,7 +2676,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2676 | { | 2676 | { |
2677 | unsigned long val; | 2677 | unsigned long val; |
2678 | char buf[64]; | 2678 | char buf[64]; |
2679 | int ret; | 2679 | int ret, cpu; |
2680 | struct trace_array *tr = filp->private_data; | 2680 | struct trace_array *tr = filp->private_data; |
2681 | 2681 | ||
2682 | if (cnt >= sizeof(buf)) | 2682 | if (cnt >= sizeof(buf)) |
@@ -2704,6 +2704,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2704 | goto out; | 2704 | goto out; |
2705 | } | 2705 | } |
2706 | 2706 | ||
2707 | /* disable all cpu buffers */ | ||
2708 | for_each_tracing_cpu(cpu) { | ||
2709 | if (global_trace.data[cpu]) | ||
2710 | atomic_inc(&global_trace.data[cpu]->disabled); | ||
2711 | if (max_tr.data[cpu]) | ||
2712 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
2713 | } | ||
2714 | |||
2707 | if (val != global_trace.entries) { | 2715 | if (val != global_trace.entries) { |
2708 | ret = ring_buffer_resize(global_trace.buffer, val); | 2716 | ret = ring_buffer_resize(global_trace.buffer, val); |
2709 | if (ret < 0) { | 2717 | if (ret < 0) { |
@@ -2735,6 +2743,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2735 | if (tracing_disabled) | 2743 | if (tracing_disabled) |
2736 | cnt = -ENOMEM; | 2744 | cnt = -ENOMEM; |
2737 | out: | 2745 | out: |
2746 | for_each_tracing_cpu(cpu) { | ||
2747 | if (global_trace.data[cpu]) | ||
2748 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
2749 | if (max_tr.data[cpu]) | ||
2750 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
2751 | } | ||
2752 | |||
2738 | max_tr.entries = global_trace.entries; | 2753 | max_tr.entries = global_trace.entries; |
2739 | mutex_unlock(&trace_types_lock); | 2754 | mutex_unlock(&trace_types_lock); |
2740 | 2755 | ||