diff options
author | Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 2008-07-18 12:16:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 04:32:26 -0400 |
commit | b07c3f193a8074aa4afe43cfa8ae38ec4c7ccfa9 (patch) | |
tree | 7d2b1d9efc5a8e24cb07c8d7f0b3e056fec8f150 /kernel/trace/trace_sched_wakeup.c | |
parent | 0a16b6075843325dc402edf80c1662838b929aff (diff) |
ftrace: port to tracepoints
Porting the trace_mark() used by ftrace to tracepoints. (cleanup)
Changelog :
- Change error messages : marker -> tracepoint
[ mingo@elte.hu: conflict resolutions ]
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Acked-by: 'Peter Zijlstra' <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 135 |
1 files changed, 34 insertions, 101 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index e303ccb62cdf..08206b4e29c4 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | #include <linux/ftrace.h> | 17 | #include <linux/ftrace.h> |
18 | #include <linux/marker.h> | 18 | #include <trace/sched.h> |
19 | 19 | ||
20 | #include "trace.h" | 20 | #include "trace.h" |
21 | 21 | ||
@@ -112,18 +112,18 @@ static int report_latency(cycle_t delta) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | static void notrace | 114 | static void notrace |
115 | wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | 115 | probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, |
116 | struct task_struct *next) | 116 | struct task_struct *next) |
117 | { | 117 | { |
118 | unsigned long latency = 0, t0 = 0, t1 = 0; | 118 | unsigned long latency = 0, t0 = 0, t1 = 0; |
119 | struct trace_array **ptr = private; | ||
120 | struct trace_array *tr = *ptr; | ||
121 | struct trace_array_cpu *data; | 119 | struct trace_array_cpu *data; |
122 | cycle_t T0, T1, delta; | 120 | cycle_t T0, T1, delta; |
123 | unsigned long flags; | 121 | unsigned long flags; |
124 | long disabled; | 122 | long disabled; |
125 | int cpu; | 123 | int cpu; |
126 | 124 | ||
125 | tracing_record_cmdline(prev); | ||
126 | |||
127 | if (unlikely(!tracer_enabled)) | 127 | if (unlikely(!tracer_enabled)) |
128 | return; | 128 | return; |
129 | 129 | ||
@@ -140,11 +140,11 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | |||
140 | return; | 140 | return; |
141 | 141 | ||
142 | /* The task we are waiting for is waking up */ | 142 | /* The task we are waiting for is waking up */ |
143 | data = tr->data[wakeup_cpu]; | 143 | data = wakeup_trace->data[wakeup_cpu]; |
144 | 144 | ||
145 | /* disable local data, not wakeup_cpu data */ | 145 | /* disable local data, not wakeup_cpu data */ |
146 | cpu = raw_smp_processor_id(); | 146 | cpu = raw_smp_processor_id(); |
147 | disabled = atomic_inc_return(&tr->data[cpu]->disabled); | 147 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
148 | if (likely(disabled != 1)) | 148 | if (likely(disabled != 1)) |
149 | goto out; | 149 | goto out; |
150 | 150 | ||
@@ -155,7 +155,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | |||
155 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 155 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
156 | goto out_unlock; | 156 | goto out_unlock; |
157 | 157 | ||
158 | trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags); | 158 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags); |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * usecs conversion is slow so we try to delay the conversion | 161 | * usecs conversion is slow so we try to delay the conversion |
@@ -174,39 +174,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | |||
174 | t0 = nsecs_to_usecs(T0); | 174 | t0 = nsecs_to_usecs(T0); |
175 | t1 = nsecs_to_usecs(T1); | 175 | t1 = nsecs_to_usecs(T1); |
176 | 176 | ||
177 | update_max_tr(tr, wakeup_task, wakeup_cpu); | 177 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
178 | 178 | ||
179 | out_unlock: | 179 | out_unlock: |
180 | __wakeup_reset(tr); | 180 | __wakeup_reset(wakeup_trace); |
181 | __raw_spin_unlock(&wakeup_lock); | 181 | __raw_spin_unlock(&wakeup_lock); |
182 | local_irq_restore(flags); | 182 | local_irq_restore(flags); |
183 | out: | 183 | out: |
184 | atomic_dec(&tr->data[cpu]->disabled); | 184 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
185 | } | ||
186 | |||
187 | static notrace void | ||
188 | sched_switch_callback(void *probe_data, void *call_data, | ||
189 | const char *format, va_list *args) | ||
190 | { | ||
191 | struct task_struct *prev; | ||
192 | struct task_struct *next; | ||
193 | struct rq *__rq; | ||
194 | |||
195 | /* skip prev_pid %d next_pid %d prev_state %ld */ | ||
196 | (void)va_arg(*args, int); | ||
197 | (void)va_arg(*args, int); | ||
198 | (void)va_arg(*args, long); | ||
199 | __rq = va_arg(*args, typeof(__rq)); | ||
200 | prev = va_arg(*args, typeof(prev)); | ||
201 | next = va_arg(*args, typeof(next)); | ||
202 | |||
203 | tracing_record_cmdline(prev); | ||
204 | |||
205 | /* | ||
206 | * If tracer_switch_func only points to the local | ||
207 | * switch func, it still needs the ptr passed to it. | ||
208 | */ | ||
209 | wakeup_sched_switch(probe_data, __rq, prev, next); | ||
210 | } | 185 | } |
211 | 186 | ||
212 | static void __wakeup_reset(struct trace_array *tr) | 187 | static void __wakeup_reset(struct trace_array *tr) |
@@ -240,19 +215,24 @@ static void wakeup_reset(struct trace_array *tr) | |||
240 | } | 215 | } |
241 | 216 | ||
242 | static void | 217 | static void |
243 | wakeup_check_start(struct trace_array *tr, struct task_struct *p, | 218 | probe_wakeup(struct rq *rq, struct task_struct *p) |
244 | struct task_struct *curr) | ||
245 | { | 219 | { |
246 | int cpu = smp_processor_id(); | 220 | int cpu = smp_processor_id(); |
247 | unsigned long flags; | 221 | unsigned long flags; |
248 | long disabled; | 222 | long disabled; |
249 | 223 | ||
224 | if (likely(!tracer_enabled)) | ||
225 | return; | ||
226 | |||
227 | tracing_record_cmdline(p); | ||
228 | tracing_record_cmdline(current); | ||
229 | |||
250 | if (likely(!rt_task(p)) || | 230 | if (likely(!rt_task(p)) || |
251 | p->prio >= wakeup_prio || | 231 | p->prio >= wakeup_prio || |
252 | p->prio >= curr->prio) | 232 | p->prio >= current->prio) |
253 | return; | 233 | return; |
254 | 234 | ||
255 | disabled = atomic_inc_return(&tr->data[cpu]->disabled); | 235 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
256 | if (unlikely(disabled != 1)) | 236 | if (unlikely(disabled != 1)) |
257 | goto out; | 237 | goto out; |
258 | 238 | ||
@@ -264,7 +244,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, | |||
264 | goto out_locked; | 244 | goto out_locked; |
265 | 245 | ||
266 | /* reset the trace */ | 246 | /* reset the trace */ |
267 | __wakeup_reset(tr); | 247 | __wakeup_reset(wakeup_trace); |
268 | 248 | ||
269 | wakeup_cpu = task_cpu(p); | 249 | wakeup_cpu = task_cpu(p); |
270 | wakeup_prio = p->prio; | 250 | wakeup_prio = p->prio; |
@@ -274,74 +254,37 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, | |||
274 | 254 | ||
275 | local_save_flags(flags); | 255 | local_save_flags(flags); |
276 | 256 | ||
277 | tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); | 257 | wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); |
278 | trace_function(tr, tr->data[wakeup_cpu], | 258 | trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], |
279 | CALLER_ADDR1, CALLER_ADDR2, flags); | 259 | CALLER_ADDR1, CALLER_ADDR2, flags); |
280 | 260 | ||
281 | out_locked: | 261 | out_locked: |
282 | __raw_spin_unlock(&wakeup_lock); | 262 | __raw_spin_unlock(&wakeup_lock); |
283 | out: | 263 | out: |
284 | atomic_dec(&tr->data[cpu]->disabled); | 264 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
285 | } | ||
286 | |||
287 | static notrace void | ||
288 | wake_up_callback(void *probe_data, void *call_data, | ||
289 | const char *format, va_list *args) | ||
290 | { | ||
291 | struct trace_array **ptr = probe_data; | ||
292 | struct trace_array *tr = *ptr; | ||
293 | struct task_struct *curr; | ||
294 | struct task_struct *task; | ||
295 | struct rq *__rq; | ||
296 | |||
297 | if (likely(!tracer_enabled)) | ||
298 | return; | ||
299 | |||
300 | /* Skip pid %d state %ld */ | ||
301 | (void)va_arg(*args, int); | ||
302 | (void)va_arg(*args, long); | ||
303 | /* now get the meat: "rq %p task %p rq->curr %p" */ | ||
304 | __rq = va_arg(*args, typeof(__rq)); | ||
305 | task = va_arg(*args, typeof(task)); | ||
306 | curr = va_arg(*args, typeof(curr)); | ||
307 | |||
308 | tracing_record_cmdline(task); | ||
309 | tracing_record_cmdline(curr); | ||
310 | |||
311 | wakeup_check_start(tr, task, curr); | ||
312 | } | 265 | } |
313 | 266 | ||
314 | static void start_wakeup_tracer(struct trace_array *tr) | 267 | static void start_wakeup_tracer(struct trace_array *tr) |
315 | { | 268 | { |
316 | int ret; | 269 | int ret; |
317 | 270 | ||
318 | ret = marker_probe_register("kernel_sched_wakeup", | 271 | ret = register_trace_sched_wakeup(probe_wakeup); |
319 | "pid %d state %ld ## rq %p task %p rq->curr %p", | ||
320 | wake_up_callback, | ||
321 | &wakeup_trace); | ||
322 | if (ret) { | 272 | if (ret) { |
323 | pr_info("wakeup trace: Couldn't add marker" | 273 | pr_info("wakeup trace: Couldn't activate tracepoint" |
324 | " probe to kernel_sched_wakeup\n"); | 274 | " probe to kernel_sched_wakeup\n"); |
325 | return; | 275 | return; |
326 | } | 276 | } |
327 | 277 | ||
328 | ret = marker_probe_register("kernel_sched_wakeup_new", | 278 | ret = register_trace_sched_wakeup_new(probe_wakeup); |
329 | "pid %d state %ld ## rq %p task %p rq->curr %p", | ||
330 | wake_up_callback, | ||
331 | &wakeup_trace); | ||
332 | if (ret) { | 279 | if (ret) { |
333 | pr_info("wakeup trace: Couldn't add marker" | 280 | pr_info("wakeup trace: Couldn't activate tracepoint" |
334 | " probe to kernel_sched_wakeup_new\n"); | 281 | " probe to kernel_sched_wakeup_new\n"); |
335 | goto fail_deprobe; | 282 | goto fail_deprobe; |
336 | } | 283 | } |
337 | 284 | ||
338 | ret = marker_probe_register("kernel_sched_schedule", | 285 | ret = register_trace_sched_switch(probe_wakeup_sched_switch); |
339 | "prev_pid %d next_pid %d prev_state %ld " | ||
340 | "## rq %p prev %p next %p", | ||
341 | sched_switch_callback, | ||
342 | &wakeup_trace); | ||
343 | if (ret) { | 286 | if (ret) { |
344 | pr_info("sched trace: Couldn't add marker" | 287 | pr_info("sched trace: Couldn't activate tracepoint" |
345 | " probe to kernel_sched_schedule\n"); | 288 | " probe to kernel_sched_schedule\n"); |
346 | goto fail_deprobe_wake_new; | 289 | goto fail_deprobe_wake_new; |
347 | } | 290 | } |
@@ -363,28 +306,18 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
363 | 306 | ||
364 | return; | 307 | return; |
365 | fail_deprobe_wake_new: | 308 | fail_deprobe_wake_new: |
366 | marker_probe_unregister("kernel_sched_wakeup_new", | 309 | unregister_trace_sched_wakeup_new(probe_wakeup); |
367 | wake_up_callback, | ||
368 | &wakeup_trace); | ||
369 | fail_deprobe: | 310 | fail_deprobe: |
370 | marker_probe_unregister("kernel_sched_wakeup", | 311 | unregister_trace_sched_wakeup(probe_wakeup); |
371 | wake_up_callback, | ||
372 | &wakeup_trace); | ||
373 | } | 312 | } |
374 | 313 | ||
375 | static void stop_wakeup_tracer(struct trace_array *tr) | 314 | static void stop_wakeup_tracer(struct trace_array *tr) |
376 | { | 315 | { |
377 | tracer_enabled = 0; | 316 | tracer_enabled = 0; |
378 | unregister_ftrace_function(&trace_ops); | 317 | unregister_ftrace_function(&trace_ops); |
379 | marker_probe_unregister("kernel_sched_schedule", | 318 | unregister_trace_sched_switch(probe_wakeup_sched_switch); |
380 | sched_switch_callback, | 319 | unregister_trace_sched_wakeup_new(probe_wakeup); |
381 | &wakeup_trace); | 320 | unregister_trace_sched_wakeup(probe_wakeup); |
382 | marker_probe_unregister("kernel_sched_wakeup_new", | ||
383 | wake_up_callback, | ||
384 | &wakeup_trace); | ||
385 | marker_probe_unregister("kernel_sched_wakeup", | ||
386 | wake_up_callback, | ||
387 | &wakeup_trace); | ||
388 | } | 321 | } |
389 | 322 | ||
390 | static void wakeup_tracer_init(struct trace_array *tr) | 323 | static void wakeup_tracer_init(struct trace_array *tr) |