aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-18 19:52:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-18 19:52:46 -0400
commitf82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240 (patch)
tree09fc553c2fb6f527962048d139159dc139e04afc /include/trace
parentc6b9e73f2fee8bb86058f296de808b326473456b (diff)
parentdcd5c1662db59a6b82942f47fb6ac9dd63f6d3dd (diff)
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits) perf: Fix unexported generic perf_arch_fetch_caller_regs perf record: Don't try to find buildids in a zero sized file perf: export perf_trace_regs and perf_arch_fetch_caller_regs perf, x86: Fix hw_perf_enable() event assignment perf, ppc: Fix compile error due to new cpu notifiers perf: Make the install relative to DESTDIR if specified kprobes: Calculate the index correctly when freeing the out-of-line execution slot perf tools: Fix sparse CPU numbering related bugs perf_event: Fix oops triggered by cpu offline/online perf: Drop the obsolete profile naming for trace events perf: Take a hot regs snapshot for trace events perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot perf/x86-64: Use frame pointer to walk on irq and process stacks lockdep: Move lock events under lockdep recursion protection perf report: Print the map table just after samples for which no map was found perf report: Add multiple event support perf session: Change perf_session post processing functions to take histogram tree perf session: Add storage for seperating event types in report perf session: Change add_hist_entry to take the tree root instead of session perf record: Add ID and to recorded event data when recording multiple events ...
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/ftrace.h44
-rw-r--r--include/trace/syscall.h8
2 files changed, 28 insertions, 24 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 601ad7744247..ea6f9d4a20e9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \
401#undef DEFINE_EVENT 401#undef DEFINE_EVENT
402#define DEFINE_EVENT(template, name, proto, args) \ 402#define DEFINE_EVENT(template, name, proto, args) \
403 \ 403 \
404static void ftrace_profile_##name(proto); \ 404static void perf_trace_##name(proto); \
405 \ 405 \
406static notrace int \ 406static notrace int \
407ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ 407perf_trace_enable_##name(struct ftrace_event_call *unused) \
408{ \ 408{ \
409 return register_trace_##name(ftrace_profile_##name); \ 409 return register_trace_##name(perf_trace_##name); \
410} \ 410} \
411 \ 411 \
412static notrace void \ 412static notrace void \
413ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ 413perf_trace_disable_##name(struct ftrace_event_call *unused) \
414{ \ 414{ \
415 unregister_trace_##name(ftrace_profile_##name); \ 415 unregister_trace_##name(perf_trace_##name); \
416} 416}
417 417
418#undef DEFINE_EVENT_PRINT 418#undef DEFINE_EVENT_PRINT
@@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
507 507
508#ifdef CONFIG_PERF_EVENTS 508#ifdef CONFIG_PERF_EVENTS
509 509
510#define _TRACE_PROFILE_INIT(call) \ 510#define _TRACE_PERF_INIT(call) \
511 .profile_enable = ftrace_profile_enable_##call, \ 511 .perf_event_enable = perf_trace_enable_##call, \
512 .profile_disable = ftrace_profile_disable_##call, 512 .perf_event_disable = perf_trace_disable_##call,
513 513
514#else 514#else
515#define _TRACE_PROFILE_INIT(call) 515#define _TRACE_PERF_INIT(call)
516#endif /* CONFIG_PERF_EVENTS */ 516#endif /* CONFIG_PERF_EVENTS */
517 517
518#undef __entry 518#undef __entry
@@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
638 .unregfunc = ftrace_raw_unreg_event_##call, \ 638 .unregfunc = ftrace_raw_unreg_event_##call, \
639 .print_fmt = print_fmt_##template, \ 639 .print_fmt = print_fmt_##template, \
640 .define_fields = ftrace_define_fields_##template, \ 640 .define_fields = ftrace_define_fields_##template, \
641 _TRACE_PROFILE_INIT(call) \ 641 _TRACE_PERF_INIT(call) \
642} 642}
643 643
644#undef DEFINE_EVENT_PRINT 644#undef DEFINE_EVENT_PRINT
@@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
657 .unregfunc = ftrace_raw_unreg_event_##call, \ 657 .unregfunc = ftrace_raw_unreg_event_##call, \
658 .print_fmt = print_fmt_##call, \ 658 .print_fmt = print_fmt_##call, \
659 .define_fields = ftrace_define_fields_##template, \ 659 .define_fields = ftrace_define_fields_##template, \
660 _TRACE_PROFILE_INIT(call) \ 660 _TRACE_PERF_INIT(call) \
661} 661}
662 662
663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
664 664
665/* 665/*
666 * Define the insertion callback to profile events 666 * Define the insertion callback to perf events
667 * 667 *
668 * The job is very similar to ftrace_raw_event_<call> except that we don't 668 * The job is very similar to ftrace_raw_event_<call> except that we don't
669 * insert in the ring buffer but in a perf counter. 669 * insert in the ring buffer but in a perf counter.
670 * 670 *
671 * static void ftrace_profile_<call>(proto) 671 * static void ftrace_perf_<call>(proto)
672 * { 672 * {
673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
674 * struct ftrace_event_call *event_call = &event_<call>; 674 * struct ftrace_event_call *event_call = &event_<call>;
@@ -757,13 +757,14 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
757#undef DECLARE_EVENT_CLASS 757#undef DECLARE_EVENT_CLASS
758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
759static notrace void \ 759static notrace void \
760ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 760perf_trace_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \ 761 proto) \
762{ \ 762{ \
763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 struct ftrace_raw_##call *entry; \ 764 struct ftrace_raw_##call *entry; \
765 u64 __addr = 0, __count = 1; \ 765 u64 __addr = 0, __count = 1; \
766 unsigned long irq_flags; \ 766 unsigned long irq_flags; \
767 struct pt_regs *__regs; \
767 int __entry_size; \ 768 int __entry_size; \
768 int __data_size; \ 769 int __data_size; \
769 int rctx; \ 770 int rctx; \
@@ -773,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
773 sizeof(u64)); \ 774 sizeof(u64)); \
774 __entry_size -= sizeof(u32); \ 775 __entry_size -= sizeof(u32); \
775 \ 776 \
776 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 777 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
777 "profile buffer not large enough")) \ 778 "profile buffer not large enough")) \
778 return; \ 779 return; \
779 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ 780 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
780 __entry_size, event_call->id, &rctx, &irq_flags); \ 781 __entry_size, event_call->id, &rctx, &irq_flags); \
781 if (!entry) \ 782 if (!entry) \
782 return; \ 783 return; \
@@ -784,17 +785,20 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
784 \ 785 \
785 { assign; } \ 786 { assign; } \
786 \ 787 \
787 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ 788 __regs = &__get_cpu_var(perf_trace_regs); \
788 __count, irq_flags); \ 789 perf_fetch_caller_regs(__regs, 2); \
790 \
791 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
792 __count, irq_flags, __regs); \
789} 793}
790 794
791#undef DEFINE_EVENT 795#undef DEFINE_EVENT
792#define DEFINE_EVENT(template, call, proto, args) \ 796#define DEFINE_EVENT(template, call, proto, args) \
793static notrace void ftrace_profile_##call(proto) \ 797static notrace void perf_trace_##call(proto) \
794{ \ 798{ \
795 struct ftrace_event_call *event_call = &event_##call; \ 799 struct ftrace_event_call *event_call = &event_##call; \
796 \ 800 \
797 ftrace_profile_templ_##template(event_call, args); \ 801 perf_trace_templ_##template(event_call, args); \
798} 802}
799 803
800#undef DEFINE_EVENT_PRINT 804#undef DEFINE_EVENT_PRINT
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 0387100752f0..e5e5f48dbfb3 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
47#endif 47#endif
48 48
49#ifdef CONFIG_PERF_EVENTS 49#ifdef CONFIG_PERF_EVENTS
50int prof_sysenter_enable(struct ftrace_event_call *call); 50int perf_sysenter_enable(struct ftrace_event_call *call);
51void prof_sysenter_disable(struct ftrace_event_call *call); 51void perf_sysenter_disable(struct ftrace_event_call *call);
52int prof_sysexit_enable(struct ftrace_event_call *call); 52int perf_sysexit_enable(struct ftrace_event_call *call);
53void prof_sysexit_disable(struct ftrace_event_call *call); 53void perf_sysexit_disable(struct ftrace_event_call *call);
54#endif 54#endif
55 55
56#endif /* _TRACE_SYSCALL_H */ 56#endif /* _TRACE_SYSCALL_H */