diff options
author | Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 2008-05-12 15:21:10 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 16:29:25 -0400 |
commit | 5b82a1b08a00b2adca3d9dd9777efff40b7aaaa1 (patch) | |
tree | 4dcce4af592ca177bee3dfeb34f9b482d142e713 /kernel/trace/trace_sched_switch.c | |
parent | 0aa977f592f17004f9d1d545f2e1bb9ea71896c3 (diff) |
Port ftrace to markers
Porting ftrace to the marker infrastructure.
Don't need to chain to the wakeup tracer from the sched tracer, because markers
support multiple probes connected.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
CC: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 171 |
1 files changed, 143 insertions, 28 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index a3376478fc2c..d25ffa5eaf2b 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -16,11 +16,14 @@ | |||
16 | 16 | ||
17 | static struct trace_array *ctx_trace; | 17 | static struct trace_array *ctx_trace; |
18 | static int __read_mostly tracer_enabled; | 18 | static int __read_mostly tracer_enabled; |
19 | static atomic_t sched_ref; | ||
19 | 20 | ||
20 | static void | 21 | static void |
21 | ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) | 22 | sched_switch_func(void *private, void *__rq, struct task_struct *prev, |
23 | struct task_struct *next) | ||
22 | { | 24 | { |
23 | struct trace_array *tr = ctx_trace; | 25 | struct trace_array **ptr = private; |
26 | struct trace_array *tr = *ptr; | ||
24 | struct trace_array_cpu *data; | 27 | struct trace_array_cpu *data; |
25 | unsigned long flags; | 28 | unsigned long flags; |
26 | long disabled; | 29 | long disabled; |
@@ -41,10 +44,40 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) | |||
41 | local_irq_restore(flags); | 44 | local_irq_restore(flags); |
42 | } | 45 | } |
43 | 46 | ||
47 | static notrace void | ||
48 | sched_switch_callback(void *probe_data, void *call_data, | ||
49 | const char *format, va_list *args) | ||
50 | { | ||
51 | struct task_struct *prev; | ||
52 | struct task_struct *next; | ||
53 | struct rq *__rq; | ||
54 | |||
55 | if (!atomic_read(&sched_ref)) | ||
56 | return; | ||
57 | |||
58 | /* skip prev_pid %d next_pid %d prev_state %ld */ | ||
59 | (void)va_arg(*args, int); | ||
60 | (void)va_arg(*args, int); | ||
61 | (void)va_arg(*args, long); | ||
62 | __rq = va_arg(*args, typeof(__rq)); | ||
63 | prev = va_arg(*args, typeof(prev)); | ||
64 | next = va_arg(*args, typeof(next)); | ||
65 | |||
66 | tracing_record_cmdline(prev); | ||
67 | |||
68 | /* | ||
69 | * If tracer_switch_func only points to the local | ||
70 | * switch func, it still needs the ptr passed to it. | ||
71 | */ | ||
72 | sched_switch_func(probe_data, __rq, prev, next); | ||
73 | } | ||
74 | |||
44 | static void | 75 | static void |
45 | wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) | 76 | wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct |
77 | task_struct *curr) | ||
46 | { | 78 | { |
47 | struct trace_array *tr = ctx_trace; | 79 | struct trace_array **ptr = private; |
80 | struct trace_array *tr = *ptr; | ||
48 | struct trace_array_cpu *data; | 81 | struct trace_array_cpu *data; |
49 | unsigned long flags; | 82 | unsigned long flags; |
50 | long disabled; | 83 | long disabled; |
@@ -67,35 +100,29 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) | |||
67 | local_irq_restore(flags); | 100 | local_irq_restore(flags); |
68 | } | 101 | } |
69 | 102 | ||
70 | void | 103 | static notrace void |
71 | ftrace_ctx_switch(void *__rq, struct task_struct *prev, | 104 | wake_up_callback(void *probe_data, void *call_data, |
72 | struct task_struct *next) | 105 | const char *format, va_list *args) |
73 | { | 106 | { |
74 | if (unlikely(atomic_read(&trace_record_cmdline_enabled))) | 107 | struct task_struct *curr; |
75 | tracing_record_cmdline(prev); | 108 | struct task_struct *task; |
109 | struct rq *__rq; | ||
76 | 110 | ||
77 | /* | 111 | if (likely(!tracer_enabled)) |
78 | * If tracer_switch_func only points to the local | 112 | return; |
79 | * switch func, it still needs the ptr passed to it. | ||
80 | */ | ||
81 | ctx_switch_func(__rq, prev, next); | ||
82 | 113 | ||
83 | /* | 114 | /* Skip pid %d state %ld */ |
84 | * Chain to the wakeup tracer (this is a NOP if disabled): | 115 | (void)va_arg(*args, int); |
85 | */ | 116 | (void)va_arg(*args, long); |
86 | wakeup_sched_switch(prev, next); | 117 | /* now get the meat: "rq %p task %p rq->curr %p" */ |
87 | } | 118 | __rq = va_arg(*args, typeof(__rq)); |
119 | task = va_arg(*args, typeof(task)); | ||
120 | curr = va_arg(*args, typeof(curr)); | ||
88 | 121 | ||
89 | void | 122 | tracing_record_cmdline(task); |
90 | ftrace_wake_up_task(void *__rq, struct task_struct *wakee, | 123 | tracing_record_cmdline(curr); |
91 | struct task_struct *curr) | ||
92 | { | ||
93 | wakeup_func(__rq, wakee, curr); | ||
94 | 124 | ||
95 | /* | 125 | wakeup_func(probe_data, __rq, task, curr); |
96 | * Chain to the wakeup tracer (this is a NOP if disabled): | ||
97 | */ | ||
98 | wakeup_sched_wakeup(wakee, curr); | ||
99 | } | 126 | } |
100 | 127 | ||
101 | void | 128 | void |
@@ -132,15 +159,95 @@ static void sched_switch_reset(struct trace_array *tr) | |||
132 | tracing_reset(tr->data[cpu]); | 159 | tracing_reset(tr->data[cpu]); |
133 | } | 160 | } |
134 | 161 | ||
162 | static int tracing_sched_register(void) | ||
163 | { | ||
164 | int ret; | ||
165 | |||
166 | ret = marker_probe_register("kernel_sched_wakeup", | ||
167 | "pid %d state %ld ## rq %p task %p rq->curr %p", | ||
168 | wake_up_callback, | ||
169 | &ctx_trace); | ||
170 | if (ret) { | ||
171 | pr_info("wakeup trace: Couldn't add marker" | ||
172 | " probe to kernel_sched_wakeup\n"); | ||
173 | return ret; | ||
174 | } | ||
175 | |||
176 | ret = marker_probe_register("kernel_sched_wakeup_new", | ||
177 | "pid %d state %ld ## rq %p task %p rq->curr %p", | ||
178 | wake_up_callback, | ||
179 | &ctx_trace); | ||
180 | if (ret) { | ||
181 | pr_info("wakeup trace: Couldn't add marker" | ||
182 | " probe to kernel_sched_wakeup_new\n"); | ||
183 | goto fail_deprobe; | ||
184 | } | ||
185 | |||
186 | ret = marker_probe_register("kernel_sched_schedule", | ||
187 | "prev_pid %d next_pid %d prev_state %ld " | ||
188 | "## rq %p prev %p next %p", | ||
189 | sched_switch_callback, | ||
190 | &ctx_trace); | ||
191 | if (ret) { | ||
192 | pr_info("sched trace: Couldn't add marker" | ||
193 | " probe to kernel_sched_schedule\n"); | ||
194 | goto fail_deprobe_wake_new; | ||
195 | } | ||
196 | |||
197 | return ret; | ||
198 | fail_deprobe_wake_new: | ||
199 | marker_probe_unregister("kernel_sched_wakeup_new", | ||
200 | wake_up_callback, | ||
201 | &ctx_trace); | ||
202 | fail_deprobe: | ||
203 | marker_probe_unregister("kernel_sched_wakeup", | ||
204 | wake_up_callback, | ||
205 | &ctx_trace); | ||
206 | return ret; | ||
207 | } | ||
208 | |||
209 | static void tracing_sched_unregister(void) | ||
210 | { | ||
211 | marker_probe_unregister("kernel_sched_schedule", | ||
212 | sched_switch_callback, | ||
213 | &ctx_trace); | ||
214 | marker_probe_unregister("kernel_sched_wakeup_new", | ||
215 | wake_up_callback, | ||
216 | &ctx_trace); | ||
217 | marker_probe_unregister("kernel_sched_wakeup", | ||
218 | wake_up_callback, | ||
219 | &ctx_trace); | ||
220 | } | ||
221 | |||
222 | void tracing_start_sched_switch(void) | ||
223 | { | ||
224 | long ref; | ||
225 | |||
226 | ref = atomic_inc_return(&sched_ref); | ||
227 | if (ref == 1) | ||
228 | tracing_sched_register(); | ||
229 | } | ||
230 | |||
231 | void tracing_stop_sched_switch(void) | ||
232 | { | ||
233 | long ref; | ||
234 | |||
235 | ref = atomic_dec_and_test(&sched_ref); | ||
236 | if (ref) | ||
237 | tracing_sched_unregister(); | ||
238 | } | ||
239 | |||
135 | static void start_sched_trace(struct trace_array *tr) | 240 | static void start_sched_trace(struct trace_array *tr) |
136 | { | 241 | { |
137 | sched_switch_reset(tr); | 242 | sched_switch_reset(tr); |
138 | atomic_inc(&trace_record_cmdline_enabled); | 243 | atomic_inc(&trace_record_cmdline_enabled); |
139 | tracer_enabled = 1; | 244 | tracer_enabled = 1; |
245 | tracing_start_sched_switch(); | ||
140 | } | 246 | } |
141 | 247 | ||
142 | static void stop_sched_trace(struct trace_array *tr) | 248 | static void stop_sched_trace(struct trace_array *tr) |
143 | { | 249 | { |
250 | tracing_stop_sched_switch(); | ||
144 | atomic_dec(&trace_record_cmdline_enabled); | 251 | atomic_dec(&trace_record_cmdline_enabled); |
145 | tracer_enabled = 0; | 252 | tracer_enabled = 0; |
146 | } | 253 | } |
@@ -181,6 +288,14 @@ static struct tracer sched_switch_trace __read_mostly = | |||
181 | 288 | ||
182 | __init static int init_sched_switch_trace(void) | 289 | __init static int init_sched_switch_trace(void) |
183 | { | 290 | { |
291 | int ret = 0; | ||
292 | |||
293 | if (atomic_read(&sched_ref)) | ||
294 | ret = tracing_sched_register(); | ||
295 | if (ret) { | ||
296 | pr_info("error registering scheduler trace\n"); | ||
297 | return ret; | ||
298 | } | ||
184 | return register_tracer(&sched_switch_trace); | 299 | return register_tracer(&sched_switch_trace); |
185 | } | 300 | } |
186 | device_initcall(init_sched_switch_trace); | 301 | device_initcall(init_sched_switch_trace); |