aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/ftrace.c5
-rw-r--r--include/linux/ftrace.h13
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace.c18
5 files changed, 26 insertions, 14 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index f98c4076a170..1b43086b097a 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -476,7 +476,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
476 &return_to_handler; 476 &return_to_handler;
477 477
478 /* Nmi's are currently unsupported */ 478 /* Nmi's are currently unsupported */
479 if (atomic_read(&in_nmi)) 479 if (unlikely(atomic_read(&in_nmi)))
480 return;
481
482 if (unlikely(atomic_read(&current->tracing_graph_pause)))
480 return; 483 return;
481 484
482 /* 485 /*
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 449fa8e9e34f..11cac81eed08 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -401,6 +401,16 @@ static inline int task_curr_ret_stack(struct task_struct *t)
401{ 401{
402 return t->curr_ret_stack; 402 return t->curr_ret_stack;
403} 403}
404
405static inline void pause_graph_tracing(void)
406{
407 atomic_inc(&current->tracing_graph_pause);
408}
409
410static inline void unpause_graph_tracing(void)
411{
412 atomic_dec(&current->tracing_graph_pause);
413}
404#else 414#else
405 415
406#define __notrace_funcgraph 416#define __notrace_funcgraph
@@ -412,6 +422,9 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
412{ 422{
413 return -1; 423 return -1;
414} 424}
425
426static inline void pause_graph_tracing(void) { }
427static inline void unpause_graph_tracing(void) { }
415#endif 428#endif
416 429
417#ifdef CONFIG_TRACING 430#ifdef CONFIG_TRACING
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4c152e0acc9e..4b81fc5f7731 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1379,6 +1379,8 @@ struct task_struct {
1379 * because of depth overrun. 1379 * because of depth overrun.
1380 */ 1380 */
1381 atomic_t trace_overrun; 1381 atomic_t trace_overrun;
1382 /* Pause for the tracing */
1383 atomic_t tracing_graph_pause;
1382#endif 1384#endif
1383#ifdef CONFIG_TRACING 1385#ifdef CONFIG_TRACING
1384 /* state flags for use by tracers */ 1386 /* state flags for use by tracers */
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2971fe48f55e..a12f80efceaa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1998,6 +1998,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1998 /* Make sure IRQs see the -1 first: */ 1998 /* Make sure IRQs see the -1 first: */
1999 barrier(); 1999 barrier();
2000 t->ret_stack = ret_stack_list[start++]; 2000 t->ret_stack = ret_stack_list[start++];
2001 atomic_set(&t->tracing_graph_pause, 0);
2001 atomic_set(&t->trace_overrun, 0); 2002 atomic_set(&t->trace_overrun, 0);
2002 } 2003 }
2003 } while_each_thread(g, t); 2004 } while_each_thread(g, t);
@@ -2077,6 +2078,7 @@ void ftrace_graph_init_task(struct task_struct *t)
2077 if (!t->ret_stack) 2078 if (!t->ret_stack)
2078 return; 2079 return;
2079 t->curr_ret_stack = -1; 2080 t->curr_ret_stack = -1;
2081 atomic_set(&t->tracing_graph_pause, 0);
2080 atomic_set(&t->trace_overrun, 0); 2082 atomic_set(&t->trace_overrun, 0);
2081 } else 2083 } else
2082 t->ret_stack = NULL; 2084 t->ret_stack = NULL;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 33549537f30f..0b8659bd5ad2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3590,14 +3590,7 @@ static __init int tracer_init_debugfs(void)
3590 3590
3591int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) 3591int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3592{ 3592{
3593 /* 3593 static DEFINE_SPINLOCK(trace_buf_lock);
3594 * Raw Spinlock because a normal spinlock would be traced here
3595 * and append an irrelevant couple spin_lock_irqsave/
3596 * spin_unlock_irqrestore traced by ftrace around this
3597 * TRACE_PRINTK trace.
3598 */
3599 static raw_spinlock_t trace_buf_lock =
3600 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3601 static char trace_buf[TRACE_BUF_SIZE]; 3594 static char trace_buf[TRACE_BUF_SIZE];
3602 3595
3603 struct ring_buffer_event *event; 3596 struct ring_buffer_event *event;
@@ -3618,8 +3611,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3618 if (unlikely(atomic_read(&data->disabled))) 3611 if (unlikely(atomic_read(&data->disabled)))
3619 goto out; 3612 goto out;
3620 3613
3621 local_irq_save(flags); 3614 pause_graph_tracing();
3622 __raw_spin_lock(&trace_buf_lock); 3615 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3623 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3616 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3624 3617
3625 len = min(len, TRACE_BUF_SIZE-1); 3618 len = min(len, TRACE_BUF_SIZE-1);
@@ -3640,9 +3633,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3640 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 3633 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3641 3634
3642 out_unlock: 3635 out_unlock:
3643 __raw_spin_unlock(&trace_buf_lock); 3636 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3644 local_irq_restore(flags); 3637 unpause_graph_tracing();
3645
3646 out: 3638 out:
3647 preempt_enable_notrace(); 3639 preempt_enable_notrace();
3648 3640