aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-12-05 21:43:41 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-08 09:11:45 -0500
commit380c4b1411ccd6885f92b2c8ceb08433a720f44e (patch)
treef3b571e1caff2de3e6b4cace87c2b1ec332667d7 /kernel/trace/trace.c
parent8e1b82e0866befaa0b2920be296c6e4c3fc7f422 (diff)
tracing/function-graph-tracer: append the tracing_graph_flag
Impact: Provide a way to pause the function graph tracer As suggested by Steven Rostedt, the previous patch that prevented from spinlock function tracing shouldn't use the raw_spinlock to fix it. It's much better to follow lockdep with normal spinlock, so this patch adds a new flag for each task to make the function graph tracer able to be paused. We also can send an ftrace_printk whithout worrying of the irrelevant traced spinlock during insertion. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 33549537f30f..0b8659bd5ad2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3590,14 +3590,7 @@ static __init int tracer_init_debugfs(void)
3590 3590
3591int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) 3591int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3592{ 3592{
3593 /* 3593 static DEFINE_SPINLOCK(trace_buf_lock);
3594 * Raw Spinlock because a normal spinlock would be traced here
3595 * and append an irrelevant couple spin_lock_irqsave/
3596 * spin_unlock_irqrestore traced by ftrace around this
3597 * TRACE_PRINTK trace.
3598 */
3599 static raw_spinlock_t trace_buf_lock =
3600 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3601 static char trace_buf[TRACE_BUF_SIZE]; 3594 static char trace_buf[TRACE_BUF_SIZE];
3602 3595
3603 struct ring_buffer_event *event; 3596 struct ring_buffer_event *event;
@@ -3618,8 +3611,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3618 if (unlikely(atomic_read(&data->disabled))) 3611 if (unlikely(atomic_read(&data->disabled)))
3619 goto out; 3612 goto out;
3620 3613
3621 local_irq_save(flags); 3614 pause_graph_tracing();
3622 __raw_spin_lock(&trace_buf_lock); 3615 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3623 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3616 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3624 3617
3625 len = min(len, TRACE_BUF_SIZE-1); 3618 len = min(len, TRACE_BUF_SIZE-1);
@@ -3640,9 +3633,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3640 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 3633 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3641 3634
3642 out_unlock: 3635 out_unlock:
3643 __raw_spin_unlock(&trace_buf_lock); 3636 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3644 local_irq_restore(flags); 3637 unpause_graph_tracing();
3645
3646 out: 3638 out:
3647 preempt_enable_notrace(); 3639 preempt_enable_notrace();
3648 3640