aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-08-09 22:42:57 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-03-07 10:06:07 -0500
commit3fd40d1ee6a317523172ab95b6f7ea41ba8fcee3 (patch)
treeb6703da042d877c047136cd4fc3263b1ffccd20d /include/trace
parent35bb4399bd0ef16b8a57fccea0047d98b6b0e7fb (diff)
tracing: Use helper functions in event assignment to shrink macro size
The functions that assign the contents for the ftrace events are defined by the TRACE_EVENT() macros. Each event has its own unique way to assign data to its buffer. When you have over 500 events, that means there's 500 functions assigning data uniquely for each event (not really that many, as DECLARE_EVENT_CLASS() and multiple DEFINE_EVENT()s will only need a single function). By making helper functions in the core kernel to do some of the work instead, we can shrink the size of the kernel down a bit. With a kernel configured with 502 events, the change in size was: text data bss dec hex filename 12987390 1913504 9785344 24686238 178ae9e /tmp/vmlinux 12959102 1913504 9785344 24657950 178401e /tmp/vmlinux.patched That's a total of 28288 bytes, which comes down to 56 bytes per event. Link: http://lkml.kernel.org/r/20120810034708.370808175@goodmis.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/ftrace.h22
1 files changed, 6 insertions, 16 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 54928faf2119..1cc2265caa52 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -532,37 +532,27 @@ static notrace void \
532ftrace_raw_event_##call(void *__data, proto) \ 532ftrace_raw_event_##call(void *__data, proto) \
533{ \ 533{ \
534 struct ftrace_event_file *ftrace_file = __data; \ 534 struct ftrace_event_file *ftrace_file = __data; \
535 struct ftrace_event_call *event_call = ftrace_file->event_call; \
536 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 535 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
537 struct ring_buffer_event *event; \ 536 struct ftrace_event_buffer fbuffer; \
538 struct ftrace_raw_##call *entry; \ 537 struct ftrace_raw_##call *entry; \
539 struct ring_buffer *buffer; \
540 unsigned long irq_flags; \
541 int __data_size; \ 538 int __data_size; \
542 int pc; \
543 \ 539 \
544 if (ftrace_trigger_soft_disabled(ftrace_file)) \ 540 if (ftrace_trigger_soft_disabled(ftrace_file)) \
545 return; \ 541 return; \
546 \ 542 \
547 local_save_flags(irq_flags); \
548 pc = preempt_count(); \
549 \
550 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 543 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
551 \ 544 \
552 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \ 545 entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
553 event_call->event.type, \ 546 sizeof(*entry) + __data_size); \
554 sizeof(*entry) + __data_size, \ 547 \
555 irq_flags, pc); \ 548 if (!entry) \
556 if (!event) \
557 return; \ 549 return; \
558 entry = ring_buffer_event_data(event); \
559 \ 550 \
560 tstruct \ 551 tstruct \
561 \ 552 \
562 { assign; } \ 553 { assign; } \
563 \ 554 \
564 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, \ 555 ftrace_event_buffer_commit(&fbuffer); \
565 irq_flags, pc); \
566} 556}
567/* 557/*
568 * The ftrace_test_probe is compiled out, it is only here as a build time check 558 * The ftrace_test_probe is compiled out, it is only here as a build time check