diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-05-22 00:22:19 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-26 16:51:22 -0400 |
commit | 7e18d8e701b6798a5df11e0a16881a60ab1018b6 (patch) | |
tree | 788be0413af1f39fb7b493089dd5a497251279f6 /kernel/trace/trace_sched_wakeup.c | |
parent | 4902f8849da6d2805bd291551a6dfd48f1b4f604 (diff) |
ftrace: add function tracing to wake up tracing
This patch adds function tracing to the functions that are called
on the CPU of the task being traced.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Cc: pq@iki.fi
Cc: proski@gnu.org
Cc: sandmann@redhat.com
Cc: a.p.zijlstra@chello.nl
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 67 |
1 files changed, 66 insertions, 1 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 5d2fb48e47f8..bf7e91caef57 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -30,6 +30,69 @@ static DEFINE_SPINLOCK(wakeup_lock); | |||
30 | 30 | ||
31 | static void __wakeup_reset(struct trace_array *tr); | 31 | static void __wakeup_reset(struct trace_array *tr); |
32 | 32 | ||
33 | #ifdef CONFIG_FTRACE | ||
34 | /* | ||
35 | * irqsoff uses its own tracer function to keep the overhead down: | ||
36 | */ | ||
37 | static void | ||
38 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | ||
39 | { | ||
40 | struct trace_array *tr = wakeup_trace; | ||
41 | struct trace_array_cpu *data; | ||
42 | unsigned long flags; | ||
43 | long disabled; | ||
44 | int resched; | ||
45 | int cpu; | ||
46 | |||
47 | if (likely(!wakeup_task)) | ||
48 | return; | ||
49 | |||
50 | resched = need_resched(); | ||
51 | preempt_disable_notrace(); | ||
52 | |||
53 | cpu = raw_smp_processor_id(); | ||
54 | data = tr->data[cpu]; | ||
55 | disabled = atomic_inc_return(&data->disabled); | ||
56 | if (unlikely(disabled != 1)) | ||
57 | goto out; | ||
58 | |||
59 | spin_lock_irqsave(&wakeup_lock, flags); | ||
60 | |||
61 | if (unlikely(!wakeup_task)) | ||
62 | goto unlock; | ||
63 | |||
64 | /* | ||
65 | * The task can't disappear because it needs to | ||
66 | * wake up first, and we have the wakeup_lock. | ||
67 | */ | ||
68 | if (task_cpu(wakeup_task) != cpu) | ||
69 | goto unlock; | ||
70 | |||
71 | trace_function(tr, data, ip, parent_ip, flags); | ||
72 | |||
73 | unlock: | ||
74 | spin_unlock_irqrestore(&wakeup_lock, flags); | ||
75 | |||
76 | out: | ||
77 | atomic_dec(&data->disabled); | ||
78 | |||
79 | /* | ||
80 | * To prevent recursion from the scheduler, if the | ||
81 | * resched flag was set before we entered, then | ||
82 | * don't reschedule. | ||
83 | */ | ||
84 | if (resched) | ||
85 | preempt_enable_no_resched_notrace(); | ||
86 | else | ||
87 | preempt_enable_notrace(); | ||
88 | } | ||
89 | |||
90 | static struct ftrace_ops trace_ops __read_mostly = | ||
91 | { | ||
92 | .func = wakeup_tracer_call, | ||
93 | }; | ||
94 | #endif /* CONFIG_FTRACE */ | ||
95 | |||
33 | /* | 96 | /* |
34 | * Should this new latency be reported/recorded? | 97 | * Should this new latency be reported/recorded? |
35 | */ | 98 | */ |
@@ -73,7 +136,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | |||
73 | if (next != wakeup_task) | 136 | if (next != wakeup_task) |
74 | return; | 137 | return; |
75 | 138 | ||
76 | /* The task we are waitng for is waking up */ | 139 | /* The task we are waiting for is waking up */ |
77 | data = tr->data[wakeup_cpu]; | 140 | data = tr->data[wakeup_cpu]; |
78 | 141 | ||
79 | /* disable local data, not wakeup_cpu data */ | 142 | /* disable local data, not wakeup_cpu data */ |
@@ -290,6 +353,7 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
290 | smp_wmb(); | 353 | smp_wmb(); |
291 | 354 | ||
292 | tracer_enabled = 1; | 355 | tracer_enabled = 1; |
356 | register_ftrace_function(&trace_ops); | ||
293 | 357 | ||
294 | return; | 358 | return; |
295 | fail_deprobe_wake_new: | 359 | fail_deprobe_wake_new: |
@@ -305,6 +369,7 @@ fail_deprobe: | |||
305 | static void stop_wakeup_tracer(struct trace_array *tr) | 369 | static void stop_wakeup_tracer(struct trace_array *tr) |
306 | { | 370 | { |
307 | tracer_enabled = 0; | 371 | tracer_enabled = 0; |
372 | unregister_ftrace_function(&trace_ops); | ||
308 | marker_probe_unregister("kernel_sched_schedule", | 373 | marker_probe_unregister("kernel_sched_schedule", |
309 | sched_switch_callback, | 374 | sched_switch_callback, |
310 | &wakeup_trace); | 375 | &wakeup_trace); |