diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-02-06 01:45:16 -0500 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2009-02-07 20:03:33 -0500 |
commit | a81bd80a0b0a405dc0483e2c428332d69da2c79f (patch) | |
tree | 8543662a0ad8199bde641c1fd4a42929d684ffaf /kernel/trace/ring_buffer.c | |
parent | 9a5fd902273d01170fd033691bd70b142baa7309 (diff) |
ring-buffer: use generic version of in_nmi
Impact: clean up
Now that a generic in_nmi is available, this patch removes the
special code in the ring_buffer and implements the in_nmi generic
version instead.
With this change, I was also able to rename the "arch_ftrace_nmi_enter"
back to "ftrace_nmi_enter" and remove the code from the ring buffer.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 43 |
1 files changed, 13 insertions, 30 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a60a6a852f42..5ee344417cd5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/hardirq.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/percpu.h> | 13 | #include <linux/percpu.h> |
13 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
@@ -20,35 +21,6 @@ | |||
20 | #include "trace.h" | 21 | #include "trace.h" |
21 | 22 | ||
22 | /* | 23 | /* |
23 | * Since the write to the buffer is still not fully lockless, | ||
24 | * we must be careful with NMIs. The locks in the writers | ||
25 | * are taken when a write crosses to a new page. The locks | ||
26 | * protect against races with the readers (this will soon | ||
27 | * be fixed with a lockless solution). | ||
28 | * | ||
29 | * Because we can not protect against NMIs, and we want to | ||
30 | * keep traces reentrant, we need to manage what happens | ||
31 | * when we are in an NMI. | ||
32 | */ | ||
33 | static DEFINE_PER_CPU(int, rb_in_nmi); | ||
34 | |||
35 | void ftrace_nmi_enter(void) | ||
36 | { | ||
37 | __get_cpu_var(rb_in_nmi)++; | ||
38 | /* call arch specific handler too */ | ||
39 | arch_ftrace_nmi_enter(); | ||
40 | } | ||
41 | |||
42 | void ftrace_nmi_exit(void) | ||
43 | { | ||
44 | arch_ftrace_nmi_exit(); | ||
45 | __get_cpu_var(rb_in_nmi)--; | ||
46 | /* NMIs are not recursive */ | ||
47 | WARN_ON_ONCE(__get_cpu_var(rb_in_nmi)); | ||
48 | } | ||
49 | |||
50 | |||
51 | /* | ||
52 | * A fast way to enable or disable all ring buffers is to | 24 | * A fast way to enable or disable all ring buffers is to |
53 | * call tracing_on or tracing_off. Turning off the ring buffers | 25 | * call tracing_on or tracing_off. Turning off the ring buffers |
54 | * prevents all ring buffers from being recorded to. | 26 | * prevents all ring buffers from being recorded to. |
@@ -1027,12 +999,23 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1027 | 999 | ||
1028 | local_irq_save(flags); | 1000 | local_irq_save(flags); |
1029 | /* | 1001 | /* |
1002 | * Since the write to the buffer is still not | ||
1003 | * fully lockless, we must be careful with NMIs. | ||
1004 | * The locks in the writers are taken when a write | ||
1005 | * crosses to a new page. The locks protect against | ||
1006 | * races with the readers (this will soon be fixed | ||
1007 | * with a lockless solution). | ||
1008 | * | ||
1009 | * Because we can not protect against NMIs, and we | ||
1010 | * want to keep traces reentrant, we need to manage | ||
1011 | * what happens when we are in an NMI. | ||
1012 | * | ||
1030 | * NMIs can happen after we take the lock. | 1013 | * NMIs can happen after we take the lock. |
1031 | * If we are in an NMI, only take the lock | 1014 | * If we are in an NMI, only take the lock |
1032 | * if it is not already taken. Otherwise | 1015 | * if it is not already taken. Otherwise |
1033 | * simply fail. | 1016 | * simply fail. |
1034 | */ | 1017 | */ |
1035 | if (unlikely(__get_cpu_var(rb_in_nmi))) { | 1018 | if (unlikely(in_nmi())) { |
1036 | if (!__raw_spin_trylock(&cpu_buffer->lock)) | 1019 | if (!__raw_spin_trylock(&cpu_buffer->lock)) |
1037 | goto out_unlock; | 1020 | goto out_unlock; |
1038 | } else | 1021 | } else |