aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-09 04:35:12 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 04:35:12 -0500
commit44b0635481437140b0e29d6023f05e805d5e7620 (patch)
treeff31986115075410d0479df307a6b9841976026c /kernel/trace/ring_buffer.c
parent4ad476e11f94fd3724c6e272d8220e99cd222b27 (diff)
parent57794a9d48b63e34acbe63282628c9f029603308 (diff)
Merge branch 'tip/tracing/core/devel' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
Conflicts: kernel/trace/trace_hw_branches.c
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c31
1 files changed, 29 insertions, 2 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index aee76b3eeed2..53ba3a6d16d0 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4,9 +4,11 @@
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */ 5 */
6#include <linux/ring_buffer.h> 6#include <linux/ring_buffer.h>
7#include <linux/ftrace_irq.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
8#include <linux/debugfs.h> 9#include <linux/debugfs.h>
9#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11#include <linux/hardirq.h>
10#include <linux/module.h> 12#include <linux/module.h>
11#include <linux/percpu.h> 13#include <linux/percpu.h>
12#include <linux/mutex.h> 14#include <linux/mutex.h>
@@ -982,6 +984,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
982 struct ring_buffer *buffer = cpu_buffer->buffer; 984 struct ring_buffer *buffer = cpu_buffer->buffer;
983 struct ring_buffer_event *event; 985 struct ring_buffer_event *event;
984 unsigned long flags; 986 unsigned long flags;
987 bool lock_taken = false;
985 988
986 commit_page = cpu_buffer->commit_page; 989 commit_page = cpu_buffer->commit_page;
987 /* we just need to protect against interrupts */ 990 /* we just need to protect against interrupts */
@@ -995,7 +998,30 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
995 struct buffer_page *next_page = tail_page; 998 struct buffer_page *next_page = tail_page;
996 999
997 local_irq_save(flags); 1000 local_irq_save(flags);
998 __raw_spin_lock(&cpu_buffer->lock); 1001 /*
1002 * Since the write to the buffer is still not
1003 * fully lockless, we must be careful with NMIs.
1004 * The locks in the writers are taken when a write
1005 * crosses to a new page. The locks protect against
1006 * races with the readers (this will soon be fixed
1007 * with a lockless solution).
1008 *
1009 * Because we can not protect against NMIs, and we
1010 * want to keep traces reentrant, we need to manage
1011 * what happens when we are in an NMI.
1012 *
1013 * NMIs can happen after we take the lock.
1014 * If we are in an NMI, only take the lock
1015 * if it is not already taken. Otherwise
1016 * simply fail.
1017 */
1018 if (unlikely(in_nmi())) {
1019 if (!__raw_spin_trylock(&cpu_buffer->lock))
1020 goto out_unlock;
1021 } else
1022 __raw_spin_lock(&cpu_buffer->lock);
1023
1024 lock_taken = true;
999 1025
1000 rb_inc_page(cpu_buffer, &next_page); 1026 rb_inc_page(cpu_buffer, &next_page);
1001 1027
@@ -1097,7 +1123,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1097 if (tail <= BUF_PAGE_SIZE) 1123 if (tail <= BUF_PAGE_SIZE)
1098 local_set(&tail_page->write, tail); 1124 local_set(&tail_page->write, tail);
1099 1125
1100 __raw_spin_unlock(&cpu_buffer->lock); 1126 if (likely(lock_taken))
1127 __raw_spin_unlock(&cpu_buffer->lock);
1101 local_irq_restore(flags); 1128 local_irq_restore(flags);
1102 return NULL; 1129 return NULL;
1103} 1130}