diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 90 |
1 files changed, 63 insertions, 27 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ce8514feedcd..13950d9027cb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -3,8 +3,10 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
5 | */ | 5 | */ |
6 | #include <linux/ftrace_event.h> | ||
6 | #include <linux/ring_buffer.h> | 7 | #include <linux/ring_buffer.h> |
7 | #include <linux/trace_clock.h> | 8 | #include <linux/trace_clock.h> |
9 | #include <linux/trace_seq.h> | ||
8 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
9 | #include <linux/debugfs.h> | 11 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
@@ -21,7 +23,6 @@ | |||
21 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
22 | 24 | ||
23 | #include <asm/local.h> | 25 | #include <asm/local.h> |
24 | #include "trace.h" | ||
25 | 26 | ||
26 | static void update_pages_handler(struct work_struct *work); | 27 | static void update_pages_handler(struct work_struct *work); |
27 | 28 | ||
@@ -2432,41 +2433,76 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2432 | 2433 | ||
2433 | #ifdef CONFIG_TRACING | 2434 | #ifdef CONFIG_TRACING |
2434 | 2435 | ||
2435 | #define TRACE_RECURSIVE_DEPTH 16 | 2436 | /* |
2437 | * The lock and unlock are done within a preempt disable section. | ||
2438 | * The current_context per_cpu variable can only be modified | ||
2439 | * by the current task between lock and unlock. But it can | ||
2440 | * be modified more than once via an interrupt. To pass this | ||
2441 | * information from the lock to the unlock without having to | ||
2442 | * access the 'in_interrupt()' functions again (which do show | ||
2443 | * a bit of overhead in something as critical as function tracing, | ||
2444 | * we use a bitmask trick. | ||
2445 | * | ||
2446 | * bit 0 = NMI context | ||
2447 | * bit 1 = IRQ context | ||
2448 | * bit 2 = SoftIRQ context | ||
2449 | * bit 3 = normal context. | ||
2450 | * | ||
2451 | * This works because this is the order of contexts that can | ||
2452 | * preempt other contexts. A SoftIRQ never preempts an IRQ | ||
2453 | * context. | ||
2454 | * | ||
2455 | * When the context is determined, the corresponding bit is | ||
2456 | * checked and set (if it was set, then a recursion of that context | ||
2457 | * happened). | ||
2458 | * | ||
2459 | * On unlock, we need to clear this bit. To do so, just subtract | ||
2460 | * 1 from the current_context and AND it to itself. | ||
2461 | * | ||
2462 | * (binary) | ||
2463 | * 101 - 1 = 100 | ||
2464 | * 101 & 100 = 100 (clearing bit zero) | ||
2465 | * | ||
2466 | * 1010 - 1 = 1001 | ||
2467 | * 1010 & 1001 = 1000 (clearing bit 1) | ||
2468 | * | ||
2469 | * The least significant bit can be cleared this way, and it | ||
2470 | * just so happens that it is the same bit corresponding to | ||
2471 | * the current context. | ||
2472 | */ | ||
2473 | static DEFINE_PER_CPU(unsigned int, current_context); | ||
2436 | 2474 | ||
2437 | /* Keep this code out of the fast path cache */ | 2475 | static __always_inline int trace_recursive_lock(void) |
2438 | static noinline void trace_recursive_fail(void) | ||
2439 | { | 2476 | { |
2440 | /* Disable all tracing before we do anything else */ | 2477 | unsigned int val = this_cpu_read(current_context); |
2441 | tracing_off_permanent(); | 2478 | int bit; |
2442 | 2479 | ||
2443 | printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" | 2480 | if (in_interrupt()) { |
2444 | "HC[%lu]:SC[%lu]:NMI[%lu]\n", | 2481 | if (in_nmi()) |
2445 | trace_recursion_buffer(), | 2482 | bit = 0; |
2446 | hardirq_count() >> HARDIRQ_SHIFT, | 2483 | else if (in_irq()) |
2447 | softirq_count() >> SOFTIRQ_SHIFT, | 2484 | bit = 1; |
2448 | in_nmi()); | 2485 | else |
2449 | 2486 | bit = 2; | |
2450 | WARN_ON_ONCE(1); | 2487 | } else |
2451 | } | 2488 | bit = 3; |
2452 | |||
2453 | static inline int trace_recursive_lock(void) | ||
2454 | { | ||
2455 | trace_recursion_inc(); | ||
2456 | 2489 | ||
2457 | if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) | 2490 | if (unlikely(val & (1 << bit))) |
2458 | return 0; | 2491 | return 1; |
2459 | 2492 | ||
2460 | trace_recursive_fail(); | 2493 | val |= (1 << bit); |
2494 | this_cpu_write(current_context, val); | ||
2461 | 2495 | ||
2462 | return -1; | 2496 | return 0; |
2463 | } | 2497 | } |
2464 | 2498 | ||
2465 | static inline void trace_recursive_unlock(void) | 2499 | static __always_inline void trace_recursive_unlock(void) |
2466 | { | 2500 | { |
2467 | WARN_ON_ONCE(!trace_recursion_buffer()); | 2501 | unsigned int val = this_cpu_read(current_context); |
2468 | 2502 | ||
2469 | trace_recursion_dec(); | 2503 | val--; |
2504 | val &= this_cpu_read(current_context); | ||
2505 | this_cpu_write(current_context, val); | ||
2470 | } | 2506 | } |
2471 | 2507 | ||
2472 | #else | 2508 | #else |
@@ -3425,7 +3461,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
3425 | /* check for end of page padding */ | 3461 | /* check for end of page padding */ |
3426 | if ((iter->head >= rb_page_size(iter->head_page)) && | 3462 | if ((iter->head >= rb_page_size(iter->head_page)) && |
3427 | (iter->head_page != cpu_buffer->commit_page)) | 3463 | (iter->head_page != cpu_buffer->commit_page)) |
3428 | rb_advance_iter(iter); | 3464 | rb_inc_iter(iter); |
3429 | } | 3465 | } |
3430 | 3466 | ||
3431 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) | 3467 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) |