diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2010-03-08 01:50:43 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-03-12 20:26:56 -0500 |
commit | 52fbe9cde7fdb5c6fac196d7ebd2d92d05ef3cd4 (patch) | |
tree | 77ec9beecf7a58ed06f59c589f122caf87ec4f0b /kernel/trace/ring_buffer.c | |
parent | 915a0b575fdb2376135ed9334b3ccb1eb51db622 (diff) |
ring-buffer: Move disabled check into preempt disable section
The ring buffer resizing and resetting relies on a schedule RCU
action. The buffers are disabled, a synchronize_sched() is called
and then the resize or reset takes place.
But this only works if the disabling of the buffers are within the
preempt disabled section, otherwise a window exists that the buffers
can be written to while a reset or resize takes place.
Cc: stable@kernel.org
Reported-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B949E43.2010906@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 8c1b2d290718..54191d6ed195 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2232,12 +2232,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2232 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2232 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2233 | return NULL; | 2233 | return NULL; |
2234 | 2234 | ||
2235 | if (atomic_read(&buffer->record_disabled)) | ||
2236 | return NULL; | ||
2237 | |||
2238 | /* If we are tracing schedule, we don't want to recurse */ | 2235 | /* If we are tracing schedule, we don't want to recurse */ |
2239 | resched = ftrace_preempt_disable(); | 2236 | resched = ftrace_preempt_disable(); |
2240 | 2237 | ||
2238 | if (atomic_read(&buffer->record_disabled)) | ||
2239 | goto out_nocheck; | ||
2240 | |||
2241 | if (trace_recursive_lock()) | 2241 | if (trace_recursive_lock()) |
2242 | goto out_nocheck; | 2242 | goto out_nocheck; |
2243 | 2243 | ||
@@ -2469,11 +2469,11 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2469 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2469 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2470 | return -EBUSY; | 2470 | return -EBUSY; |
2471 | 2471 | ||
2472 | if (atomic_read(&buffer->record_disabled)) | ||
2473 | return -EBUSY; | ||
2474 | |||
2475 | resched = ftrace_preempt_disable(); | 2472 | resched = ftrace_preempt_disable(); |
2476 | 2473 | ||
2474 | if (atomic_read(&buffer->record_disabled)) | ||
2475 | goto out; | ||
2476 | |||
2477 | cpu = raw_smp_processor_id(); | 2477 | cpu = raw_smp_processor_id(); |
2478 | 2478 | ||
2479 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2479 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |