aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c62
1 files changed, 45 insertions, 17 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9ab18995ff1e..0cddf60186da 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2534,29 +2534,59 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2534 * The lock and unlock are done within a preempt disable section. 2534 * The lock and unlock are done within a preempt disable section.
2535 * The current_context per_cpu variable can only be modified 2535 * The current_context per_cpu variable can only be modified
2536 * by the current task between lock and unlock. But it can 2536 * by the current task between lock and unlock. But it can
2537 * be modified more than once via an interrupt. There are four 2537 * be modified more than once via an interrupt. To pass this
2538 * different contexts that we need to consider. 2538 * information from the lock to the unlock without having to
2539 * access the 'in_interrupt()' functions again (which do show
2540 * a bit of overhead in something as critical as function tracing,
2541 * we use a bitmask trick.
2539 * 2542 *
2540 * Normal context. 2543 * bit 0 = NMI context
2541 * SoftIRQ context 2544 * bit 1 = IRQ context
2542 * IRQ context 2545 * bit 2 = SoftIRQ context
2543 * NMI context 2546 * bit 3 = normal context.
2544 * 2547 *
2545 * If for some reason the ring buffer starts to recurse, we 2548 * This works because this is the order of contexts that can
2546 * only allow that to happen at most 4 times (one for each 2549 * preempt other contexts. A SoftIRQ never preempts an IRQ
2547 * context). If it happens 5 times, then we consider this a 2550 * context.
2548 * recusive loop and do not let it go further. 2551 *
2552 * When the context is determined, the corresponding bit is
2553 * checked and set (if it was set, then a recursion of that context
2554 * happened).
2555 *
2556 * On unlock, we need to clear this bit. To do so, just subtract
2557 * 1 from the current_context and AND it to itself.
2558 *
2559 * (binary)
2560 * 101 - 1 = 100
2561 * 101 & 100 = 100 (clearing bit zero)
2562 *
2563 * 1010 - 1 = 1001
2564 * 1010 & 1001 = 1000 (clearing bit 1)
2565 *
2566 * The least significant bit can be cleared this way, and it
2567 * just so happens that it is the same bit corresponding to
2568 * the current context.
2549 */ 2569 */
2550 2570
2551static __always_inline int 2571static __always_inline int
2552trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 2572trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2553{ 2573{
2554 if (cpu_buffer->current_context >= 4) 2574 unsigned int val = cpu_buffer->current_context;
2575 unsigned long pc = preempt_count();
2576 int bit;
2577
2578 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
2579 bit = RB_CTX_NORMAL;
2580 else
2581 bit = pc & NMI_MASK ? RB_CTX_NMI :
2582 pc & HARDIRQ_MASK ? RB_CTX_IRQ :
2583 pc & SOFTIRQ_OFFSET ? 2 : RB_CTX_SOFTIRQ;
2584
2585 if (unlikely(val & (1 << bit)))
2555 return 1; 2586 return 1;
2556 2587
2557 cpu_buffer->current_context++; 2588 val |= (1 << bit);
2558 /* Interrupts must see this update */ 2589 cpu_buffer->current_context = val;
2559 barrier();
2560 2590
2561 return 0; 2591 return 0;
2562} 2592}
@@ -2564,9 +2594,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2564static __always_inline void 2594static __always_inline void
2565trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 2595trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2566{ 2596{
2567 /* Don't let the dec leak out */ 2597 cpu_buffer->current_context &= cpu_buffer->current_context - 1;
2568 barrier();
2569 cpu_buffer->current_context--;
2570} 2598}
2571 2599
2572/** 2600/**