aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorCorey Minyard <cminyard@mvista.com>2014-07-16 15:07:13 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-07-18 13:58:12 -0400
commit021c5b34452d52e51664f09b98cd50c5495e74b6 (patch)
tree9210b55531cec2cf3ba0bc5059ece034ad0dc0ba /kernel/trace/ring_buffer.c
parent3a636388bae8390d23f31e061c0c6fdc14525786 (diff)
ring-buffer: Always run per-cpu ring buffer resize with schedule_work_on()
The code for resizing the trace ring buffers has to run the per-cpu resize on the CPU itself. The code was using preempt_off() and running the code for the current CPU directly, otherwise calling schedule_work_on(). At least on RT this could result in the following: |BUG: sleeping function called from invalid context at kernel/rtmutex.c:673 |in_atomic(): 1, irqs_disabled(): 0, pid: 607, name: bash |3 locks held by bash/607: |CPU: 0 PID: 607 Comm: bash Not tainted 3.12.15-rt25+ #124 |(rt_spin_lock+0x28/0x68) |(free_hot_cold_page+0x84/0x3b8) |(free_buffer_page+0x14/0x20) |(rb_update_pages+0x280/0x338) |(ring_buffer_resize+0x32c/0x3dc) |(free_snapshot+0x18/0x38) |(tracing_set_tracer+0x27c/0x2ac) probably via |cd /sys/kernel/debug/tracing/ |echo 1 > events/enable ; sleep 2 |echo 1024 > buffer_size_kb If we just always use schedule_work_on(), there's no need for the preempt_off(). So do that. Link: http://lkml.kernel.org/p/1405537633-31518-1-git-send-email-cminyard@mvista.com Reported-by: Stanislav Meduna <stano@meduna.org> Signed-off-by: Corey Minyard <cminyard@mvista.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c24
1 files changed, 4 insertions, 20 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7c56c3d06943..35825a87d6a3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1693,22 +1693,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1693 if (!cpu_buffer->nr_pages_to_update) 1693 if (!cpu_buffer->nr_pages_to_update)
1694 continue; 1694 continue;
1695 1695
1696 /* The update must run on the CPU that is being updated. */ 1696 /* Can't run something on an offline CPU. */
1697 preempt_disable(); 1697 if (!cpu_online(cpu)) {
1698 if (cpu == smp_processor_id() || !cpu_online(cpu)) {
1699 rb_update_pages(cpu_buffer); 1698 rb_update_pages(cpu_buffer);
1700 cpu_buffer->nr_pages_to_update = 0; 1699 cpu_buffer->nr_pages_to_update = 0;
1701 } else { 1700 } else {
1702 /*
1703 * Can not disable preemption for schedule_work_on()
1704 * on PREEMPT_RT.
1705 */
1706 preempt_enable();
1707 schedule_work_on(cpu, 1701 schedule_work_on(cpu,
1708 &cpu_buffer->update_pages_work); 1702 &cpu_buffer->update_pages_work);
1709 preempt_disable();
1710 } 1703 }
1711 preempt_enable();
1712 } 1704 }
1713 1705
1714 /* wait for all the updates to complete */ 1706 /* wait for all the updates to complete */
@@ -1746,22 +1738,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1746 1738
1747 get_online_cpus(); 1739 get_online_cpus();
1748 1740
1749 preempt_disable(); 1741 /* Can't run something on an offline CPU. */
1750 /* The update must run on the CPU that is being updated. */ 1742 if (!cpu_online(cpu_id))
1751 if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
1752 rb_update_pages(cpu_buffer); 1743 rb_update_pages(cpu_buffer);
1753 else { 1744 else {
1754 /*
1755 * Can not disable preemption for schedule_work_on()
1756 * on PREEMPT_RT.
1757 */
1758 preempt_enable();
1759 schedule_work_on(cpu_id, 1745 schedule_work_on(cpu_id,
1760 &cpu_buffer->update_pages_work); 1746 &cpu_buffer->update_pages_work);
1761 wait_for_completion(&cpu_buffer->update_done); 1747 wait_for_completion(&cpu_buffer->update_done);
1762 preempt_disable();
1763 } 1748 }
1764 preempt_enable();
1765 1749
1766 cpu_buffer->nr_pages_to_update = 0; 1750 cpu_buffer->nr_pages_to_update = 0;
1767 put_online_cpus(); 1751 put_online_cpus();