diff options
author | Steven Rostedt <srostedt@redhat.com> | 2010-10-19 13:17:08 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-10-20 15:17:57 -0400 |
commit | d9abde2138e0a00a0d7e44676928efa0ef629d48 (patch) | |
tree | 29b75afdf39664debd21c50af5d882856f7fa2a9 /kernel/trace | |
parent | 140ff89127c74b1b1c1b0152a36ea3720ccf6bc3 (diff) |
ring-buffer: Micro-optimize with some strategic inlining
By using inline and noinline, we are able to make the fast path of
recording an event 4% faster.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d9f3e7a82137..f5007d0d932d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2078,7 +2078,7 @@ static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) | |||
2078 | local_inc(&cpu_buffer->commits); | 2078 | local_inc(&cpu_buffer->commits); |
2079 | } | 2079 | } |
2080 | 2080 | ||
2081 | static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) | 2081 | static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) |
2082 | { | 2082 | { |
2083 | unsigned long commits; | 2083 | unsigned long commits; |
2084 | 2084 | ||
@@ -2193,13 +2193,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2193 | 2193 | ||
2194 | #define TRACE_RECURSIVE_DEPTH 16 | 2194 | #define TRACE_RECURSIVE_DEPTH 16 |
2195 | 2195 | ||
2196 | static int trace_recursive_lock(void) | 2196 | /* Keep this code out of the fast path cache */ |
2197 | static noinline void trace_recursive_fail(void) | ||
2197 | { | 2198 | { |
2198 | current->trace_recursion++; | ||
2199 | |||
2200 | if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) | ||
2201 | return 0; | ||
2202 | |||
2203 | /* Disable all tracing before we do anything else */ | 2199 | /* Disable all tracing before we do anything else */ |
2204 | tracing_off_permanent(); | 2200 | tracing_off_permanent(); |
2205 | 2201 | ||
@@ -2211,10 +2207,21 @@ static int trace_recursive_lock(void) | |||
2211 | in_nmi()); | 2207 | in_nmi()); |
2212 | 2208 | ||
2213 | WARN_ON_ONCE(1); | 2209 | WARN_ON_ONCE(1); |
2210 | } | ||
2211 | |||
2212 | static inline int trace_recursive_lock(void) | ||
2213 | { | ||
2214 | current->trace_recursion++; | ||
2215 | |||
2216 | if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) | ||
2217 | return 0; | ||
2218 | |||
2219 | trace_recursive_fail(); | ||
2220 | |||
2214 | return -1; | 2221 | return -1; |
2215 | } | 2222 | } |
2216 | 2223 | ||
2217 | static void trace_recursive_unlock(void) | 2224 | static inline void trace_recursive_unlock(void) |
2218 | { | 2225 | { |
2219 | WARN_ON_ONCE(!current->trace_recursion); | 2226 | WARN_ON_ONCE(!current->trace_recursion); |
2220 | 2227 | ||