aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-08-06 12:08:41 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-08-13 21:05:12 -0400
commit36009d07b79d2a168d6037947357d96e5d8cebe7 (patch)
tree70e960fc660e47cfaf7546cbd7cd401a4842846f
parentd4e4ab86bcba5a72779c43dc1459f71fea3d89c8 (diff)
tracing/perf: Expand TRACE_EVENT(sched_stat_runtime)
To simplify the review of the next patches: 1. We are going to reimplent __perf_task/counter and embedd them into TP_ARGS(). expand TRACE_EVENT(sched_stat_runtime) into DECLARE_EVENT_CLASS() + DEFINE_EVENT(), this way they can use different TP_ARGS's. 2. Change perf_trace_##call() macro to do perf_fetch_caller_regs() right before perf_trace_buf_prepare(). This way it evaluates TP_ARGS() asap, the next patch explores this fact. Note: after 87f44bbc perf_trace_buf_prepare() doesn't need "struct pt_regs *regs", perhaps it makes sense to remove this argument. And perhaps we can teach perf_trace_buf_submit() to accept regs == NULL and do fetch_caller_regs(CALLER_ADDR1) in this case. 3. Cosmetic, but the typecast from "void*" buys nothing. It just adds the noise, remove it. Link: http://lkml.kernel.org/r/20130806160841.GA2736@redhat.com Acked-by: Peter Zijlstra <peterz@infradead.org> Tested-by: David Ahern <dsahern@gmail.com> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--include/trace/events/sched.h6
-rw-r--r--include/trace/ftrace.h7
2 files changed, 8 insertions, 5 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e5586caff67a..249c024e67ae 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -372,7 +372,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
372 * Tracepoint for accounting runtime (time the task is executing 372 * Tracepoint for accounting runtime (time the task is executing
373 * on a CPU). 373 * on a CPU).
374 */ 374 */
375TRACE_EVENT(sched_stat_runtime, 375DECLARE_EVENT_CLASS(sched_stat_runtime,
376 376
377 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), 377 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
378 378
@@ -401,6 +401,10 @@ TRACE_EVENT(sched_stat_runtime,
401 (unsigned long long)__entry->vruntime) 401 (unsigned long long)__entry->vruntime)
402); 402);
403 403
404DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
405 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
406 TP_ARGS(tsk, runtime, vruntime));
407
404/* 408/*
405 * Tracepoint for showing priority inheritance modifying a tasks 409 * Tracepoint for showing priority inheritance modifying a tasks
406 * priority. 410 * priority.
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 41a6643e2136..618af05f0be6 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -663,15 +663,14 @@ perf_trace_##call(void *__data, proto) \
663 int __data_size; \ 663 int __data_size; \
664 int rctx; \ 664 int rctx; \
665 \ 665 \
666 perf_fetch_caller_regs(&__regs); \
667 \
668 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 666 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
669 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 667 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
670 sizeof(u64)); \ 668 sizeof(u64)); \
671 __entry_size -= sizeof(u32); \ 669 __entry_size -= sizeof(u32); \
672 \ 670 \
673 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ 671 perf_fetch_caller_regs(&__regs); \
674 __entry_size, event_call->event.type, &__regs, &rctx); \ 672 entry = perf_trace_buf_prepare(__entry_size, \
673 event_call->event.type, &__regs, &rctx); \
675 if (!entry) \ 674 if (!entry) \
676 return; \ 675 return; \
677 \ 676 \