aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c42
1 files changed, 42 insertions, 0 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 84a6055f37c9..b421b0ea9112 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1481,6 +1481,40 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1481 return event; 1481 return event;
1482} 1482}
1483 1483
1484static int trace_irq_level(void)
1485{
1486 return hardirq_count() + softirq_count() + in_nmi();
1487}
1488
1489static int trace_recursive_lock(void)
1490{
1491 int level;
1492
1493 level = trace_irq_level();
1494
1495 if (unlikely(current->trace_recursion & (1 << level))) {
1496 /* Disable all tracing before we do anything else */
1497 tracing_off_permanent();
1498 WARN_ON_ONCE(1);
1499 return -1;
1500 }
1501
1502 current->trace_recursion |= 1 << level;
1503
1504 return 0;
1505}
1506
1507static void trace_recursive_unlock(void)
1508{
1509 int level;
1510
1511 level = trace_irq_level();
1512
1513 WARN_ON_ONCE(!current->trace_recursion & (1 << level));
1514
1515 current->trace_recursion &= ~(1 << level);
1516}
1517
1484static DEFINE_PER_CPU(int, rb_need_resched); 1518static DEFINE_PER_CPU(int, rb_need_resched);
1485 1519
1486/** 1520/**
@@ -1514,6 +1548,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1514 /* If we are tracing schedule, we don't want to recurse */ 1548 /* If we are tracing schedule, we don't want to recurse */
1515 resched = ftrace_preempt_disable(); 1549 resched = ftrace_preempt_disable();
1516 1550
1551 if (trace_recursive_lock())
1552 goto out_nocheck;
1553
1517 cpu = raw_smp_processor_id(); 1554 cpu = raw_smp_processor_id();
1518 1555
1519 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1556 if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -1543,6 +1580,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1543 return event; 1580 return event;
1544 1581
1545 out: 1582 out:
1583 trace_recursive_unlock();
1584
1585 out_nocheck:
1546 ftrace_preempt_enable(resched); 1586 ftrace_preempt_enable(resched);
1547 return NULL; 1587 return NULL;
1548} 1588}
@@ -1581,6 +1621,8 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1581 1621
1582 rb_commit(cpu_buffer, event); 1622 rb_commit(cpu_buffer, event);
1583 1623
1624 trace_recursive_unlock();
1625
1584 /* 1626 /*
1585 * Only the last preempt count needs to restore preemption. 1627 * Only the last preempt count needs to restore preemption.
1586 */ 1628 */