aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-08-06 12:08:47 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-08-13 21:06:30 -0400
commitd027e6a9c83440bf1ca9e5503539d58d8e0914f1 (patch)
tree8f7397e15dd463c878939fa9d063adf167580573 /include/trace
parent12473965c38a527a0c6f7a38d23edce60957f873 (diff)
tracing/perf: Avoid perf_trace_buf_*() in perf_trace_##call() when possible
perf_trace_buf_prepare() + perf_trace_buf_submit(task => NULL) make no sense if hlist_empty(head). Change perf_trace_##call() to check ->perf_events beforehand and do nothing if it is empty. This removes the overhead for tasks without events associated with them. For example, "perf record -e sched:sched_switch -p1" attaches the counter(s) to the single task, but every task in system will do perf_trace_buf_prepare/submit() just to realize that it was not attached to this event. However, we can only do this if __task == NULL, so we also add the __builtin_constant_p(__task) check. With this patch "perf bench sched pipe" shows approximately 4% improvement when "perf record -p1" runs in parallel, many thanks to Steven for the testing. Link: http://lkml.kernel.org/r/20130806160847.GA2746@redhat.com Tested-by: David Ahern <dsahern@gmail.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/ftrace.h7
1 files changed, 6 insertions, 1 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 4163d93ccf38..5c7ab17cbb02 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -667,6 +667,12 @@ perf_trace_##call(void *__data, proto) \
667 int rctx; \ 667 int rctx; \
668 \ 668 \
669 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 669 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
670 \
671 head = this_cpu_ptr(event_call->perf_events); \
672 if (__builtin_constant_p(!__task) && !__task && \
673 hlist_empty(head)) \
674 return; \
675 \
670 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 676 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
671 sizeof(u64)); \ 677 sizeof(u64)); \
672 __entry_size -= sizeof(u32); \ 678 __entry_size -= sizeof(u32); \
@@ -681,7 +687,6 @@ perf_trace_##call(void *__data, proto) \
681 \ 687 \
682 { assign; } \ 688 { assign; } \
683 \ 689 \
684 head = this_cpu_ptr(event_call->perf_events); \
685 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 690 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
686 __count, &__regs, head, __task); \ 691 __count, &__regs, head, __task); \
687} 692}