diff options
-rw-r--r-- | arch/x86/kernel/ftrace.c | 4 | ||||
-rw-r--r-- | include/linux/ftrace_irq.h | 8 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 43 |
3 files changed, 15 insertions, 40 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 918073c6681b..d74d75e0952d 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -113,7 +113,7 @@ static void ftrace_mod_code(void) | |||
113 | MCOUNT_INSN_SIZE); | 113 | MCOUNT_INSN_SIZE); |
114 | } | 114 | } |
115 | 115 | ||
116 | void arch_ftrace_nmi_enter(void) | 116 | void ftrace_nmi_enter(void) |
117 | { | 117 | { |
118 | atomic_inc(&nmi_running); | 118 | atomic_inc(&nmi_running); |
119 | /* Must have nmi_running seen before reading write flag */ | 119 | /* Must have nmi_running seen before reading write flag */ |
@@ -124,7 +124,7 @@ void arch_ftrace_nmi_enter(void) | |||
124 | } | 124 | } |
125 | } | 125 | } |
126 | 126 | ||
127 | void arch_ftrace_nmi_exit(void) | 127 | void ftrace_nmi_exit(void) |
128 | { | 128 | { |
129 | /* Finish all executions before clearing nmi_running */ | 129 | /* Finish all executions before clearing nmi_running */ |
130 | smp_wmb(); | 130 | smp_wmb(); |
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 29de6779a963..dca7bf8cffe2 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h | |||
@@ -3,14 +3,6 @@ | |||
3 | 3 | ||
4 | 4 | ||
5 | #ifdef CONFIG_FTRACE_NMI_ENTER | 5 | #ifdef CONFIG_FTRACE_NMI_ENTER |
6 | extern void arch_ftrace_nmi_enter(void); | ||
7 | extern void arch_ftrace_nmi_exit(void); | ||
8 | #else | ||
9 | static inline void arch_ftrace_nmi_enter(void) { } | ||
10 | static inline void arch_ftrace_nmi_exit(void) { } | ||
11 | #endif | ||
12 | |||
13 | #ifdef CONFIG_RING_BUFFER | ||
14 | extern void ftrace_nmi_enter(void); | 6 | extern void ftrace_nmi_enter(void); |
15 | extern void ftrace_nmi_exit(void); | 7 | extern void ftrace_nmi_exit(void); |
16 | #else | 8 | #else |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a60a6a852f42..5ee344417cd5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/hardirq.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/percpu.h> | 13 | #include <linux/percpu.h> |
13 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
@@ -20,35 +21,6 @@ | |||
20 | #include "trace.h" | 21 | #include "trace.h" |
21 | 22 | ||
22 | /* | 23 | /* |
23 | * Since the write to the buffer is still not fully lockless, | ||
24 | * we must be careful with NMIs. The locks in the writers | ||
25 | * are taken when a write crosses to a new page. The locks | ||
26 | * protect against races with the readers (this will soon | ||
27 | * be fixed with a lockless solution). | ||
28 | * | ||
29 | * Because we can not protect against NMIs, and we want to | ||
30 | * keep traces reentrant, we need to manage what happens | ||
31 | * when we are in an NMI. | ||
32 | */ | ||
33 | static DEFINE_PER_CPU(int, rb_in_nmi); | ||
34 | |||
35 | void ftrace_nmi_enter(void) | ||
36 | { | ||
37 | __get_cpu_var(rb_in_nmi)++; | ||
38 | /* call arch specific handler too */ | ||
39 | arch_ftrace_nmi_enter(); | ||
40 | } | ||
41 | |||
42 | void ftrace_nmi_exit(void) | ||
43 | { | ||
44 | arch_ftrace_nmi_exit(); | ||
45 | __get_cpu_var(rb_in_nmi)--; | ||
46 | /* NMIs are not recursive */ | ||
47 | WARN_ON_ONCE(__get_cpu_var(rb_in_nmi)); | ||
48 | } | ||
49 | |||
50 | |||
51 | /* | ||
52 | * A fast way to enable or disable all ring buffers is to | 24 | * A fast way to enable or disable all ring buffers is to |
53 | * call tracing_on or tracing_off. Turning off the ring buffers | 25 | * call tracing_on or tracing_off. Turning off the ring buffers |
54 | * prevents all ring buffers from being recorded to. | 26 | * prevents all ring buffers from being recorded to. |
@@ -1027,12 +999,23 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1027 | 999 | ||
1028 | local_irq_save(flags); | 1000 | local_irq_save(flags); |
1029 | /* | 1001 | /* |
1002 | * Since the write to the buffer is still not | ||
1003 | * fully lockless, we must be careful with NMIs. | ||
1004 | * The locks in the writers are taken when a write | ||
1005 | * crosses to a new page. The locks protect against | ||
1006 | * races with the readers (this will soon be fixed | ||
1007 | * with a lockless solution). | ||
1008 | * | ||
1009 | * Because we can not protect against NMIs, and we | ||
1010 | * want to keep traces reentrant, we need to manage | ||
1011 | * what happens when we are in an NMI. | ||
1012 | * | ||
1030 | * NMIs can happen after we take the lock. | 1013 | * NMIs can happen after we take the lock. |
1031 | * If we are in an NMI, only take the lock | 1014 | * If we are in an NMI, only take the lock |
1032 | * if it is not already taken. Otherwise | 1015 | * if it is not already taken. Otherwise |
1033 | * simply fail. | 1016 | * simply fail. |
1034 | */ | 1017 | */ |
1035 | if (unlikely(__get_cpu_var(rb_in_nmi))) { | 1018 | if (unlikely(in_nmi())) { |
1036 | if (!__raw_spin_trylock(&cpu_buffer->lock)) | 1019 | if (!__raw_spin_trylock(&cpu_buffer->lock)) |
1037 | goto out_unlock; | 1020 | goto out_unlock; |
1038 | } else | 1021 | } else |