diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2008-12-05 21:43:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-08 09:11:45 -0500 |
commit | 380c4b1411ccd6885f92b2c8ceb08433a720f44e (patch) | |
tree | f3b571e1caff2de3e6b4cace87c2b1ec332667d7 /kernel | |
parent | 8e1b82e0866befaa0b2920be296c6e4c3fc7f422 (diff) |
tracing/function-graph-tracer: append the tracing_graph_flag
Impact: Provide a way to pause the function graph tracer
As suggested by Steven Rostedt, the previous patch that prevented from
spinlock function tracing shouldn't use the raw_spinlock to fix it.
It's much better to follow lockdep with normal spinlock, so this patch
adds a new flag for each task to make the function graph tracer able
to be paused. We also can send an ftrace_printk whithout worrying of
the irrelevant traced spinlock during insertion.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ftrace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace.c | 18 |
2 files changed, 7 insertions, 13 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2971fe48f55e..a12f80efceaa 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1998,6 +1998,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | |||
1998 | /* Make sure IRQs see the -1 first: */ | 1998 | /* Make sure IRQs see the -1 first: */ |
1999 | barrier(); | 1999 | barrier(); |
2000 | t->ret_stack = ret_stack_list[start++]; | 2000 | t->ret_stack = ret_stack_list[start++]; |
2001 | atomic_set(&t->tracing_graph_pause, 0); | ||
2001 | atomic_set(&t->trace_overrun, 0); | 2002 | atomic_set(&t->trace_overrun, 0); |
2002 | } | 2003 | } |
2003 | } while_each_thread(g, t); | 2004 | } while_each_thread(g, t); |
@@ -2077,6 +2078,7 @@ void ftrace_graph_init_task(struct task_struct *t) | |||
2077 | if (!t->ret_stack) | 2078 | if (!t->ret_stack) |
2078 | return; | 2079 | return; |
2079 | t->curr_ret_stack = -1; | 2080 | t->curr_ret_stack = -1; |
2081 | atomic_set(&t->tracing_graph_pause, 0); | ||
2080 | atomic_set(&t->trace_overrun, 0); | 2082 | atomic_set(&t->trace_overrun, 0); |
2081 | } else | 2083 | } else |
2082 | t->ret_stack = NULL; | 2084 | t->ret_stack = NULL; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 33549537f30f..0b8659bd5ad2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3590,14 +3590,7 @@ static __init int tracer_init_debugfs(void) | |||
3590 | 3590 | ||
3591 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | 3591 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
3592 | { | 3592 | { |
3593 | /* | 3593 | static DEFINE_SPINLOCK(trace_buf_lock); |
3594 | * Raw Spinlock because a normal spinlock would be traced here | ||
3595 | * and append an irrelevant couple spin_lock_irqsave/ | ||
3596 | * spin_unlock_irqrestore traced by ftrace around this | ||
3597 | * TRACE_PRINTK trace. | ||
3598 | */ | ||
3599 | static raw_spinlock_t trace_buf_lock = | ||
3600 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
3601 | static char trace_buf[TRACE_BUF_SIZE]; | 3594 | static char trace_buf[TRACE_BUF_SIZE]; |
3602 | 3595 | ||
3603 | struct ring_buffer_event *event; | 3596 | struct ring_buffer_event *event; |
@@ -3618,8 +3611,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
3618 | if (unlikely(atomic_read(&data->disabled))) | 3611 | if (unlikely(atomic_read(&data->disabled))) |
3619 | goto out; | 3612 | goto out; |
3620 | 3613 | ||
3621 | local_irq_save(flags); | 3614 | pause_graph_tracing(); |
3622 | __raw_spin_lock(&trace_buf_lock); | 3615 | spin_lock_irqsave(&trace_buf_lock, irq_flags); |
3623 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 3616 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
3624 | 3617 | ||
3625 | len = min(len, TRACE_BUF_SIZE-1); | 3618 | len = min(len, TRACE_BUF_SIZE-1); |
@@ -3640,9 +3633,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
3640 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 3633 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
3641 | 3634 | ||
3642 | out_unlock: | 3635 | out_unlock: |
3643 | __raw_spin_unlock(&trace_buf_lock); | 3636 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); |
3644 | local_irq_restore(flags); | 3637 | unpause_graph_tracing(); |
3645 | |||
3646 | out: | 3638 | out: |
3647 | preempt_enable_notrace(); | 3639 | preempt_enable_notrace(); |
3648 | 3640 | ||