aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-05 22:30:07 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-07 20:01:21 -0500
commit4e6ea1440c67de32d7c89aacf233472dfc3bce82 (patch)
treeaaa7e3982ec03083144c922078f707e4903aa577 /arch/x86/kernel/ftrace.c
parentd8b891a2db13c8ed296158d6f8c4e335896d0cef (diff)
ftrace, x86: rename in_nmi variable
Impact: clean up The in_nmi variable in x86 arch ftrace.c is a misnomer. Andrew Morton pointed out that the in_nmi variable is incremented by all CPUS. It can be set when another CPU is running an NMI. Since this is actually intentional, the fix is to rename it to what it really is: "nmi_running" Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4c683587055b..e3fad2ef622c 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -82,7 +82,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
82 * are the same as what exists. 82 * are the same as what exists.
83 */ 83 */
84 84
85static atomic_t in_nmi = ATOMIC_INIT(0); 85static atomic_t nmi_running = ATOMIC_INIT(0);
86static int mod_code_status; /* holds return value of text write */ 86static int mod_code_status; /* holds return value of text write */
87static int mod_code_write; /* set when NMI should do the write */ 87static int mod_code_write; /* set when NMI should do the write */
88static void *mod_code_ip; /* holds the IP to write to */ 88static void *mod_code_ip; /* holds the IP to write to */
@@ -115,8 +115,8 @@ static void ftrace_mod_code(void)
115 115
116void arch_ftrace_nmi_enter(void) 116void arch_ftrace_nmi_enter(void)
117{ 117{
118 atomic_inc(&in_nmi); 118 atomic_inc(&nmi_running);
119 /* Must have in_nmi seen before reading write flag */ 119 /* Must have nmi_running seen before reading write flag */
120 smp_mb(); 120 smp_mb();
121 if (mod_code_write) { 121 if (mod_code_write) {
122 ftrace_mod_code(); 122 ftrace_mod_code();
@@ -126,19 +126,19 @@ void arch_ftrace_nmi_enter(void)
126 126
127void arch_ftrace_nmi_exit(void) 127void arch_ftrace_nmi_exit(void)
128{ 128{
129 /* Finish all executions before clearing in_nmi */ 129 /* Finish all executions before clearing nmi_running */
130 smp_wmb(); 130 smp_wmb();
131 atomic_dec(&in_nmi); 131 atomic_dec(&nmi_running);
132} 132}
133 133
134static void wait_for_nmi(void) 134static void wait_for_nmi(void)
135{ 135{
136 if (!atomic_read(&in_nmi)) 136 if (!atomic_read(&nmi_running))
137 return; 137 return;
138 138
139 do { 139 do {
140 cpu_relax(); 140 cpu_relax();
141 } while(atomic_read(&in_nmi)); 141 } while (atomic_read(&nmi_running));
142 142
143 nmi_wait_count++; 143 nmi_wait_count++;
144} 144}
@@ -374,16 +374,16 @@ int ftrace_disable_ftrace_graph_caller(void)
374 * this page for dynamic ftrace. They have been 374 * this page for dynamic ftrace. They have been
375 * simplified to ignore all traces in NMI context. 375 * simplified to ignore all traces in NMI context.
376 */ 376 */
377static atomic_t in_nmi; 377static atomic_t nmi_running;
378 378
379void arch_ftrace_nmi_enter(void) 379void arch_ftrace_nmi_enter(void)
380{ 380{
381 atomic_inc(&in_nmi); 381 atomic_inc(&nmi_running);
382} 382}
383 383
384void arch_ftrace_nmi_exit(void) 384void arch_ftrace_nmi_exit(void)
385{ 385{
386 atomic_dec(&in_nmi); 386 atomic_dec(&nmi_running);
387} 387}
388 388
389#endif /* !CONFIG_DYNAMIC_FTRACE */ 389#endif /* !CONFIG_DYNAMIC_FTRACE */
@@ -475,7 +475,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
475 &return_to_handler; 475 &return_to_handler;
476 476
477 /* Nmi's are currently unsupported */ 477 /* Nmi's are currently unsupported */
478 if (unlikely(atomic_read(&in_nmi))) 478 if (unlikely(atomic_read(&nmi_running)))
479 return; 479 return;
480 480
481 if (unlikely(atomic_read(&current->tracing_graph_pause))) 481 if (unlikely(atomic_read(&current->tracing_graph_pause)))