diff options
author | Steven Rostedt (Red Hat) <srostedt@redhat.com> | 2013-03-07 09:27:42 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:35:52 -0400 |
commit | f5eb5588262cab7232ed1d77cf612b327db50767 (patch) | |
tree | 77ccc8e85f9be6af5cce5e689b6766a1fadf613b /kernel/trace/ring_buffer.c | |
parent | ad909e21bbe69f1d39055d346540abd827190eca (diff) |
ring-buffer: Do not use schedule_work_on() for current CPU
The ring buffer updates when done while the ring buffer is active,
needs to be completed on the CPU that is used for the ring buffer
per_cpu buffer. To accomplish this, schedule_work_on() is used to
schedule work on the given CPU.
Now there's no reason to use schedule_work_on() if the process
doing the update happens to be on the CPU that it is processing.
It has already filled the requirement. Instead, just do the work
and continue.
This is needed for tracing_snapshot_alloc() where it may be called
really early in boot, where the work queues have not been set up yet.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 65fe2a4f9824..d1c85c5f5f51 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1679,11 +1679,22 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1679 | if (!cpu_buffer->nr_pages_to_update) | 1679 | if (!cpu_buffer->nr_pages_to_update) |
1680 | continue; | 1680 | continue; |
1681 | 1681 | ||
1682 | if (cpu_online(cpu)) | 1682 | /* The update must run on the CPU that is being updated. */ |
1683 | preempt_disable(); | ||
1684 | if (cpu == smp_processor_id() || !cpu_online(cpu)) { | ||
1685 | rb_update_pages(cpu_buffer); | ||
1686 | cpu_buffer->nr_pages_to_update = 0; | ||
1687 | } else { | ||
1688 | /* | ||
1689 | * Can not disable preemption for schedule_work_on() | ||
1690 | * on PREEMPT_RT. | ||
1691 | */ | ||
1692 | preempt_enable(); | ||
1683 | schedule_work_on(cpu, | 1693 | schedule_work_on(cpu, |
1684 | &cpu_buffer->update_pages_work); | 1694 | &cpu_buffer->update_pages_work); |
1685 | else | 1695 | preempt_disable(); |
1686 | rb_update_pages(cpu_buffer); | 1696 | } |
1697 | preempt_enable(); | ||
1687 | } | 1698 | } |
1688 | 1699 | ||
1689 | /* wait for all the updates to complete */ | 1700 | /* wait for all the updates to complete */ |
@@ -1721,12 +1732,22 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1721 | 1732 | ||
1722 | get_online_cpus(); | 1733 | get_online_cpus(); |
1723 | 1734 | ||
1724 | if (cpu_online(cpu_id)) { | 1735 | preempt_disable(); |
1736 | /* The update must run on the CPU that is being updated. */ | ||
1737 | if (cpu_id == smp_processor_id() || !cpu_online(cpu_id)) | ||
1738 | rb_update_pages(cpu_buffer); | ||
1739 | else { | ||
1740 | /* | ||
1741 | * Can not disable preemption for schedule_work_on() | ||
1742 | * on PREEMPT_RT. | ||
1743 | */ | ||
1744 | preempt_enable(); | ||
1725 | schedule_work_on(cpu_id, | 1745 | schedule_work_on(cpu_id, |
1726 | &cpu_buffer->update_pages_work); | 1746 | &cpu_buffer->update_pages_work); |
1727 | wait_for_completion(&cpu_buffer->update_done); | 1747 | wait_for_completion(&cpu_buffer->update_done); |
1728 | } else | 1748 | preempt_disable(); |
1729 | rb_update_pages(cpu_buffer); | 1749 | } |
1750 | preempt_enable(); | ||
1730 | 1751 | ||
1731 | cpu_buffer->nr_pages_to_update = 0; | 1752 | cpu_buffer->nr_pages_to_update = 0; |
1732 | put_online_cpus(); | 1753 | put_online_cpus(); |