diff options
author | David Miller <davem@davemloft.net> | 2010-04-20 18:47:11 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-04-27 13:06:35 -0400 |
commit | 72c9ddfd4c5bf54ef03cfdf57026416cb678eeba (patch) | |
tree | bd2c2b6b411975a8219d7138ba7699ee5d324e77 /kernel/trace/trace.c | |
parent | 62b915f1060996a8e1f69be50e3b8e9e43b710cb (diff) |
ring-buffer: Make non-consuming read less expensive with lots of cpus.
When performing a non-consuming read, a synchronize_sched() is
performed once for every cpu which is actively tracing.
This is very expensive, and can make it take several seconds to open
up the 'trace' file with lots of cpus.
Only one synchronize_sched() call is actually necessary. What is
desired is for all cpus to see the disabling state change. So we
transform the existing sequence:
for_each_cpu() {
ring_buffer_read_start();
}
where each ring_buffer_start() call performs a synchronize_sched(),
into the following:
for_each_cpu() {
ring_buffer_read_prepare();
}
ring_buffer_read_prepare_sync();
for_each_cpu() {
ring_buffer_read_start();
}
wherein only the single ring_buffer_read_prepare_sync() call needs to
do the synchronize_sched().
The first phase, via ring_buffer_read_prepare(), allocates the 'iter'
memory and increments ->record_disabled.
In the second phase, ring_buffer_read_prepare_sync() makes sure this
->record_disabled state is visible fully to all cpus.
And in the final third phase, the ring_buffer_read_start() calls reset
the 'iter' objects allocated in the first phase since we now know that
none of the cpus are adding trace entries any more.
This makes openning the 'trace' file nearly instantaneous on a
sparc64 Niagara2 box with 128 cpus tracing.
Signed-off-by: David S. Miller <davem@davemloft.net>
LKML-Reference: <20100420.154711.11246950.davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8b9ba41ec146..756d7283318b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2201,15 +2201,20 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2201 | 2201 | ||
2202 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2202 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
2203 | for_each_tracing_cpu(cpu) { | 2203 | for_each_tracing_cpu(cpu) { |
2204 | |||
2205 | iter->buffer_iter[cpu] = | 2204 | iter->buffer_iter[cpu] = |
2206 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2205 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
2206 | } | ||
2207 | ring_buffer_read_prepare_sync(); | ||
2208 | for_each_tracing_cpu(cpu) { | ||
2209 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
2207 | tracing_iter_reset(iter, cpu); | 2210 | tracing_iter_reset(iter, cpu); |
2208 | } | 2211 | } |
2209 | } else { | 2212 | } else { |
2210 | cpu = iter->cpu_file; | 2213 | cpu = iter->cpu_file; |
2211 | iter->buffer_iter[cpu] = | 2214 | iter->buffer_iter[cpu] = |
2212 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2215 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
2216 | ring_buffer_read_prepare_sync(); | ||
2217 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
2213 | tracing_iter_reset(iter, cpu); | 2218 | tracing_iter_reset(iter, cpu); |
2214 | } | 2219 | } |
2215 | 2220 | ||