aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace.h7
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/trace/ring_buffer.c42
4 files changed, 53 insertions, 1 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 97c83e1bc589..39b95c56587e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -488,8 +488,15 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
488 488
489extern int ftrace_dump_on_oops; 489extern int ftrace_dump_on_oops;
490 490
491#ifdef CONFIG_PREEMPT
492#define INIT_TRACE_RECURSION .trace_recursion = 0,
493#endif
494
491#endif /* CONFIG_TRACING */ 495#endif /* CONFIG_TRACING */
492 496
497#ifndef INIT_TRACE_RECURSION
498#define INIT_TRACE_RECURSION
499#endif
493 500
494#ifdef CONFIG_HW_BRANCH_TRACER 501#ifdef CONFIG_HW_BRANCH_TRACER
495 502
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index dcfb93337e9a..6fc218529863 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -187,6 +187,7 @@ extern struct cred init_cred;
187 INIT_TRACE_IRQFLAGS \ 187 INIT_TRACE_IRQFLAGS \
188 INIT_LOCKDEP \ 188 INIT_LOCKDEP \
189 INIT_FTRACE_GRAPH \ 189 INIT_FTRACE_GRAPH \
190 INIT_TRACE_RECURSION \
190} 191}
191 192
192 193
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b4c38bc8049c..7ede5e490913 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1428,7 +1428,9 @@ struct task_struct {
1428#ifdef CONFIG_TRACING 1428#ifdef CONFIG_TRACING
1429 /* state flags for use by tracers */ 1429 /* state flags for use by tracers */
1430 unsigned long trace; 1430 unsigned long trace;
1431#endif 1431 /* bitmask of trace recursion */
1432 unsigned long trace_recursion;
1433#endif /* CONFIG_TRACING */
1432}; 1434};
1433 1435
1434/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1436/* Future-safe accessor for struct task_struct's cpus_allowed. */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 84a6055f37c9..b421b0ea9112 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1481,6 +1481,40 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1481 return event; 1481 return event;
1482} 1482}
1483 1483
1484static int trace_irq_level(void)
1485{
1486 return hardirq_count() + softirq_count() + in_nmi();
1487}
1488
1489static int trace_recursive_lock(void)
1490{
1491 int level;
1492
1493 level = trace_irq_level();
1494
1495 if (unlikely(current->trace_recursion & (1 << level))) {
1496 /* Disable all tracing before we do anything else */
1497 tracing_off_permanent();
1498 WARN_ON_ONCE(1);
1499 return -1;
1500 }
1501
1502 current->trace_recursion |= 1 << level;
1503
1504 return 0;
1505}
1506
1507static void trace_recursive_unlock(void)
1508{
1509 int level;
1510
1511 level = trace_irq_level();
1512
1513 WARN_ON_ONCE(!current->trace_recursion & (1 << level));
1514
1515 current->trace_recursion &= ~(1 << level);
1516}
1517
1484static DEFINE_PER_CPU(int, rb_need_resched); 1518static DEFINE_PER_CPU(int, rb_need_resched);
1485 1519
1486/** 1520/**
@@ -1514,6 +1548,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1514 /* If we are tracing schedule, we don't want to recurse */ 1548 /* If we are tracing schedule, we don't want to recurse */
1515 resched = ftrace_preempt_disable(); 1549 resched = ftrace_preempt_disable();
1516 1550
1551 if (trace_recursive_lock())
1552 goto out_nocheck;
1553
1517 cpu = raw_smp_processor_id(); 1554 cpu = raw_smp_processor_id();
1518 1555
1519 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1556 if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -1543,6 +1580,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1543 return event; 1580 return event;
1544 1581
1545 out: 1582 out:
1583 trace_recursive_unlock();
1584
1585 out_nocheck:
1546 ftrace_preempt_enable(resched); 1586 ftrace_preempt_enable(resched);
1547 return NULL; 1587 return NULL;
1548} 1588}
@@ -1581,6 +1621,8 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1581 1621
1582 rb_commit(cpu_buffer, event); 1622 rb_commit(cpu_buffer, event);
1583 1623
1624 trace_recursive_unlock();
1625
1584 /* 1626 /*
1585 * Only the last preempt count needs to restore preemption. 1627 * Only the last preempt count needs to restore preemption.
1586 */ 1628 */