aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-05 18:43:07 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-07 20:00:17 -0500
commit78d904b46a72fcf15ea6a39672bbef92953876b5 (patch)
tree69f56f7bbd1866491517d902bdf18ab973f0eb5f /arch/x86/kernel/ftrace.c
parent1830b52d0de8c60c4f5dfbac134aa8f69d815801 (diff)
ring-buffer: add NMI protection for spinlocks
Impact: prevent deadlock in NMI The ring buffers are not yet totally lockless with writing to the buffer. When a writer crosses a page, it grabs a per cpu spinlock to protect against a reader. The spinlocks taken by a writer are not to protect against other writers, since a writer can only write to its own per cpu buffer. The spinlocks protect against readers that can touch any cpu buffer. The writers are made to be reentrant with the spinlocks disabling interrupts. The problem arises when an NMI writes to the buffer, and that write crosses a page boundary. If it grabs a spinlock, it can be racing with another writer (since disabling interrupts does not protect against NMIs) or with a reader on the same CPU. Luckily, most of the users are not reentrant and protects against this issue. But if a user of the ring buffer becomes reentrant (which is what the ring buffers do allow), if the NMI also writes to the ring buffer then we risk the chance of a deadlock. This patch moves the ftrace_nmi_enter called by nmi_enter() to the ring buffer code. It replaces the current ftrace_nmi_enter that is used by arch specific code to arch_ftrace_nmi_enter and updates the Kconfig to handle it. When an NMI is called, it will set a per cpu variable in the ring buffer code and will clear it when the NMI exits. If a write to the ring buffer crosses page boundaries inside an NMI, a trylock is used on the spin lock instead. If the spinlock fails to be acquired, then the entry is discarded. This bug appeared in the ftrace work in the RT tree, where event tracing is reentrant. This workaround solved the deadlocks that appeared there. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4d33224c055f..4c683587055b 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -113,7 +113,7 @@ static void ftrace_mod_code(void)
113 MCOUNT_INSN_SIZE); 113 MCOUNT_INSN_SIZE);
114} 114}
115 115
116void ftrace_nmi_enter(void) 116void arch_ftrace_nmi_enter(void)
117{ 117{
118 atomic_inc(&in_nmi); 118 atomic_inc(&in_nmi);
119 /* Must have in_nmi seen before reading write flag */ 119 /* Must have in_nmi seen before reading write flag */
@@ -124,7 +124,7 @@ void ftrace_nmi_enter(void)
124 } 124 }
125} 125}
126 126
127void ftrace_nmi_exit(void) 127void arch_ftrace_nmi_exit(void)
128{ 128{
129 /* Finish all executions before clearing in_nmi */ 129 /* Finish all executions before clearing in_nmi */
130 smp_wmb(); 130 smp_wmb();
@@ -376,12 +376,12 @@ int ftrace_disable_ftrace_graph_caller(void)
376 */ 376 */
377static atomic_t in_nmi; 377static atomic_t in_nmi;
378 378
379void ftrace_nmi_enter(void) 379void arch_ftrace_nmi_enter(void)
380{ 380{
381 atomic_inc(&in_nmi); 381 atomic_inc(&in_nmi);
382} 382}
383 383
384void ftrace_nmi_exit(void) 384void arch_ftrace_nmi_exit(void)
385{ 385{
386 atomic_dec(&in_nmi); 386 atomic_dec(&in_nmi);
387} 387}