aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-12-22 21:19:29 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-12-27 14:21:09 -0500
commitae415fa4c5248a8cf4faabd5a3c20576cb1ad607 (patch)
tree6b9ee0b8ea7064dadf79fa505afda046c0627f45
parent6b7e633fe9c24682df550e5311f47fb524701586 (diff)
ring-buffer: Do no reuse reader page if still in use
To free the reader page that is allocated with ring_buffer_alloc_read_page(), ring_buffer_free_read_page() must be called. For faster performance, this page can be reused by the ring buffer to avoid having to free and allocate new pages. The issue arises when the page is used with a splice pipe into the networking code. The networking code may up the page counter for the page, and keep it active while sending it is queued to go to the network. The incrementing of the page ref does not prevent it from being reused in the ring buffer, and this can cause the page that is being sent out to the network to be modified before it is sent by reading new data. Add a check to the page ref counter, and only reuse the page if it is not being used anywhere else. Cc: stable@vger.kernel.org Fixes: 73a757e63114d ("ring-buffer: Return reader page back into existing ring buffer") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ring_buffer.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e06cde093f76..9ab18995ff1e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4404,8 +4404,13 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4404{ 4404{
4405 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4405 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4406 struct buffer_data_page *bpage = data; 4406 struct buffer_data_page *bpage = data;
4407 struct page *page = virt_to_page(bpage);
4407 unsigned long flags; 4408 unsigned long flags;
4408 4409
4410 /* If the page is still in use someplace else, we can't reuse it */
4411 if (page_ref_count(page) > 1)
4412 goto out;
4413
4409 local_irq_save(flags); 4414 local_irq_save(flags);
4410 arch_spin_lock(&cpu_buffer->lock); 4415 arch_spin_lock(&cpu_buffer->lock);
4411 4416
@@ -4417,6 +4422,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4417 arch_spin_unlock(&cpu_buffer->lock); 4422 arch_spin_unlock(&cpu_buffer->lock);
4418 local_irq_restore(flags); 4423 local_irq_restore(flags);
4419 4424
4425 out:
4420 free_page((unsigned long)bpage); 4426 free_page((unsigned long)bpage);
4421} 4427}
4422EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 4428EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);