aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-03-12 04:20:57 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-12 04:20:59 -0500
commit937779db13fb6cb621e28d9ae0a6cf1d05b57d05 (patch)
tree6c27402677c347c4dc01980de78c270630588847 /include
parent6230f2c7ef01a69e2ba9370326572c287209d32a (diff)
parent9f591fd76afdc0e5192e9ed00a36f8efc0b4dfe6 (diff)
Merge branch 'perf/urgent' into perf/core
Merge reason: We want to queue up a dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace_event.h23
-rw-r--r--include/linux/perf_event.h43
-rw-r--r--include/linux/syscalls.h24
-rw-r--r--include/trace/ftrace.h44
-rw-r--r--include/trace/syscall.h8
5 files changed, 95 insertions, 47 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 6b7c444ab8f6..c0f4b364c711 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -131,12 +131,12 @@ struct ftrace_event_call {
131 void *mod; 131 void *mod;
132 void *data; 132 void *data;
133 133
134 int profile_count; 134 int perf_refcount;
135 int (*profile_enable)(struct ftrace_event_call *); 135 int (*perf_event_enable)(struct ftrace_event_call *);
136 void (*profile_disable)(struct ftrace_event_call *); 136 void (*perf_event_disable)(struct ftrace_event_call *);
137}; 137};
138 138
139#define FTRACE_MAX_PROFILE_SIZE 2048 139#define PERF_MAX_TRACE_SIZE 2048
140 140
141#define MAX_FILTER_PRED 32 141#define MAX_FILTER_PRED 32
142#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 142#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
@@ -187,22 +187,25 @@ do { \
187 187
188#ifdef CONFIG_PERF_EVENTS 188#ifdef CONFIG_PERF_EVENTS
189struct perf_event; 189struct perf_event;
190extern int ftrace_profile_enable(int event_id); 190
191extern void ftrace_profile_disable(int event_id); 191DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
192
193extern int perf_trace_enable(int event_id);
194extern void perf_trace_disable(int event_id);
192extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, 195extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
193 char *filter_str); 196 char *filter_str);
194extern void ftrace_profile_free_filter(struct perf_event *event); 197extern void ftrace_profile_free_filter(struct perf_event *event);
195extern void * 198extern void *
196ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, 199perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
197 unsigned long *irq_flags); 200 unsigned long *irq_flags);
198 201
199static inline void 202static inline void
200ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, 203perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
201 u64 count, unsigned long irq_flags) 204 u64 count, unsigned long irq_flags, struct pt_regs *regs)
202{ 205{
203 struct trace_entry *entry = raw_data; 206 struct trace_entry *entry = raw_data;
204 207
205 perf_tp_event(entry->type, addr, count, raw_data, size); 208 perf_tp_event(entry->type, addr, count, raw_data, size, regs);
206 perf_swevent_put_recursion_context(rctx); 209 perf_swevent_put_recursion_context(rctx);
207 local_irq_restore(irq_flags); 210 local_irq_restore(irq_flags);
208} 211}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index be85f7c4a94f..2bccb7b9da2d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -459,6 +459,8 @@ enum perf_callchain_context {
459#include <linux/fs.h> 459#include <linux/fs.h>
460#include <linux/pid_namespace.h> 460#include <linux/pid_namespace.h>
461#include <linux/workqueue.h> 461#include <linux/workqueue.h>
462#include <linux/ftrace.h>
463#include <linux/cpu.h>
462#include <asm/atomic.h> 464#include <asm/atomic.h>
463 465
464#define PERF_MAX_STACK_DEPTH 255 466#define PERF_MAX_STACK_DEPTH 255
@@ -865,6 +867,44 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
865 __perf_sw_event(event_id, nr, nmi, regs, addr); 867 __perf_sw_event(event_id, nr, nmi, regs, addr);
866} 868}
867 869
870extern void
871perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
872
873/*
874 * Take a snapshot of the regs. Skip ip and frame pointer to
875 * the nth caller. We only need a few of the regs:
876 * - ip for PERF_SAMPLE_IP
877 * - cs for user_mode() tests
878 * - bp for callchains
879 * - eflags, for future purposes, just in case
880 */
881static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
882{
883 unsigned long ip;
884
885 memset(regs, 0, sizeof(*regs));
886
887 switch (skip) {
888 case 1 :
889 ip = CALLER_ADDR0;
890 break;
891 case 2 :
892 ip = CALLER_ADDR1;
893 break;
894 case 3 :
895 ip = CALLER_ADDR2;
896 break;
897 case 4:
898 ip = CALLER_ADDR3;
899 break;
900 /* No need to support further for now */
901 default:
902 ip = 0;
903 }
904
905 return perf_arch_fetch_caller_regs(regs, ip, skip);
906}
907
868extern void __perf_event_mmap(struct vm_area_struct *vma); 908extern void __perf_event_mmap(struct vm_area_struct *vma);
869 909
870static inline void perf_event_mmap(struct vm_area_struct *vma) 910static inline void perf_event_mmap(struct vm_area_struct *vma)
@@ -898,7 +938,8 @@ static inline bool perf_paranoid_kernel(void)
898} 938}
899 939
900extern void perf_event_init(void); 940extern void perf_event_init(void);
901extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); 941extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
942 int entry_size, struct pt_regs *regs);
902extern void perf_bp_event(struct perf_event *event, void *data); 943extern void perf_bp_event(struct perf_event *event, void *data);
903 944
904#ifndef perf_misc_flags 945#ifndef perf_misc_flags
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 8126f239edf0..51435bcc3460 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -101,18 +101,18 @@ struct perf_event_attr;
101 101
102#ifdef CONFIG_PERF_EVENTS 102#ifdef CONFIG_PERF_EVENTS
103 103
104#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 104#define TRACE_SYS_ENTER_PERF_INIT(sname) \
105 .profile_enable = prof_sysenter_enable, \ 105 .perf_event_enable = perf_sysenter_enable, \
106 .profile_disable = prof_sysenter_disable, 106 .perf_event_disable = perf_sysenter_disable,
107 107
108#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ 108#define TRACE_SYS_EXIT_PERF_INIT(sname) \
109 .profile_enable = prof_sysexit_enable, \ 109 .perf_event_enable = perf_sysexit_enable, \
110 .profile_disable = prof_sysexit_disable, 110 .perf_event_disable = perf_sysexit_disable,
111#else 111#else
112#define TRACE_SYS_ENTER_PROFILE(sname) 112#define TRACE_SYS_ENTER_PERF(sname)
113#define TRACE_SYS_ENTER_PROFILE_INIT(sname) 113#define TRACE_SYS_ENTER_PERF_INIT(sname)
114#define TRACE_SYS_EXIT_PROFILE(sname) 114#define TRACE_SYS_EXIT_PERF(sname)
115#define TRACE_SYS_EXIT_PROFILE_INIT(sname) 115#define TRACE_SYS_EXIT_PERF_INIT(sname)
116#endif /* CONFIG_PERF_EVENTS */ 116#endif /* CONFIG_PERF_EVENTS */
117 117
118#ifdef CONFIG_FTRACE_SYSCALLS 118#ifdef CONFIG_FTRACE_SYSCALLS
@@ -149,7 +149,7 @@ struct perf_event_attr;
149 .regfunc = reg_event_syscall_enter, \ 149 .regfunc = reg_event_syscall_enter, \
150 .unregfunc = unreg_event_syscall_enter, \ 150 .unregfunc = unreg_event_syscall_enter, \
151 .data = (void *)&__syscall_meta_##sname,\ 151 .data = (void *)&__syscall_meta_##sname,\
152 TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 152 TRACE_SYS_ENTER_PERF_INIT(sname) \
153 } 153 }
154 154
155#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 155#define SYSCALL_TRACE_EXIT_EVENT(sname) \
@@ -171,7 +171,7 @@ struct perf_event_attr;
171 .regfunc = reg_event_syscall_exit, \ 171 .regfunc = reg_event_syscall_exit, \
172 .unregfunc = unreg_event_syscall_exit, \ 172 .unregfunc = unreg_event_syscall_exit, \
173 .data = (void *)&__syscall_meta_##sname,\ 173 .data = (void *)&__syscall_meta_##sname,\
174 TRACE_SYS_EXIT_PROFILE_INIT(sname) \ 174 TRACE_SYS_EXIT_PERF_INIT(sname) \
175 } 175 }
176 176
177#define SYSCALL_METADATA(sname, nb) \ 177#define SYSCALL_METADATA(sname, nb) \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 0804cd594803..25ab56f75d65 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \
401#undef DEFINE_EVENT 401#undef DEFINE_EVENT
402#define DEFINE_EVENT(template, name, proto, args) \ 402#define DEFINE_EVENT(template, name, proto, args) \
403 \ 403 \
404static void ftrace_profile_##name(proto); \ 404static void perf_trace_##name(proto); \
405 \ 405 \
406static notrace int \ 406static notrace int \
407ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ 407perf_trace_enable_##name(struct ftrace_event_call *unused) \
408{ \ 408{ \
409 return register_trace_##name(ftrace_profile_##name); \ 409 return register_trace_##name(perf_trace_##name); \
410} \ 410} \
411 \ 411 \
412static notrace void \ 412static notrace void \
413ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ 413perf_trace_disable_##name(struct ftrace_event_call *unused) \
414{ \ 414{ \
415 unregister_trace_##name(ftrace_profile_##name); \ 415 unregister_trace_##name(perf_trace_##name); \
416} 416}
417 417
418#undef DEFINE_EVENT_PRINT 418#undef DEFINE_EVENT_PRINT
@@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
507 507
508#ifdef CONFIG_PERF_EVENTS 508#ifdef CONFIG_PERF_EVENTS
509 509
510#define _TRACE_PROFILE_INIT(call) \ 510#define _TRACE_PERF_INIT(call) \
511 .profile_enable = ftrace_profile_enable_##call, \ 511 .perf_event_enable = perf_trace_enable_##call, \
512 .profile_disable = ftrace_profile_disable_##call, 512 .perf_event_disable = perf_trace_disable_##call,
513 513
514#else 514#else
515#define _TRACE_PROFILE_INIT(call) 515#define _TRACE_PERF_INIT(call)
516#endif /* CONFIG_PERF_EVENTS */ 516#endif /* CONFIG_PERF_EVENTS */
517 517
518#undef __entry 518#undef __entry
@@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
638 .unregfunc = ftrace_raw_unreg_event_##call, \ 638 .unregfunc = ftrace_raw_unreg_event_##call, \
639 .print_fmt = print_fmt_##template, \ 639 .print_fmt = print_fmt_##template, \
640 .define_fields = ftrace_define_fields_##template, \ 640 .define_fields = ftrace_define_fields_##template, \
641 _TRACE_PROFILE_INIT(call) \ 641 _TRACE_PERF_INIT(call) \
642} 642}
643 643
644#undef DEFINE_EVENT_PRINT 644#undef DEFINE_EVENT_PRINT
@@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
657 .unregfunc = ftrace_raw_unreg_event_##call, \ 657 .unregfunc = ftrace_raw_unreg_event_##call, \
658 .print_fmt = print_fmt_##call, \ 658 .print_fmt = print_fmt_##call, \
659 .define_fields = ftrace_define_fields_##template, \ 659 .define_fields = ftrace_define_fields_##template, \
660 _TRACE_PROFILE_INIT(call) \ 660 _TRACE_PERF_INIT(call) \
661} 661}
662 662
663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
664 664
665/* 665/*
666 * Define the insertion callback to profile events 666 * Define the insertion callback to perf events
667 * 667 *
668 * The job is very similar to ftrace_raw_event_<call> except that we don't 668 * The job is very similar to ftrace_raw_event_<call> except that we don't
669 * insert in the ring buffer but in a perf counter. 669 * insert in the ring buffer but in a perf counter.
670 * 670 *
671 * static void ftrace_profile_<call>(proto) 671 * static void ftrace_perf_<call>(proto)
672 * { 672 * {
673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
674 * struct ftrace_event_call *event_call = &event_<call>; 674 * struct ftrace_event_call *event_call = &event_<call>;
@@ -757,13 +757,14 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
757#undef DECLARE_EVENT_CLASS 757#undef DECLARE_EVENT_CLASS
758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
759static notrace void \ 759static notrace void \
760ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 760perf_trace_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \ 761 proto) \
762{ \ 762{ \
763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 struct ftrace_raw_##call *entry; \ 764 struct ftrace_raw_##call *entry; \
765 u64 __addr = 0, __count = 1; \ 765 u64 __addr = 0, __count = 1; \
766 unsigned long irq_flags; \ 766 unsigned long irq_flags; \
767 struct pt_regs *__regs; \
767 int __entry_size; \ 768 int __entry_size; \
768 int __data_size; \ 769 int __data_size; \
769 int rctx; \ 770 int rctx; \
@@ -773,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
773 sizeof(u64)); \ 774 sizeof(u64)); \
774 __entry_size -= sizeof(u32); \ 775 __entry_size -= sizeof(u32); \
775 \ 776 \
776 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 777 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
777 "profile buffer not large enough")) \ 778 "profile buffer not large enough")) \
778 return; \ 779 return; \
779 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ 780 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
780 __entry_size, event_call->id, &rctx, &irq_flags); \ 781 __entry_size, event_call->id, &rctx, &irq_flags); \
781 if (!entry) \ 782 if (!entry) \
782 return; \ 783 return; \
@@ -784,17 +785,20 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
784 \ 785 \
785 { assign; } \ 786 { assign; } \
786 \ 787 \
787 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ 788 __regs = &__get_cpu_var(perf_trace_regs); \
788 __count, irq_flags); \ 789 perf_fetch_caller_regs(__regs, 2); \
790 \
791 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
792 __count, irq_flags, __regs); \
789} 793}
790 794
791#undef DEFINE_EVENT 795#undef DEFINE_EVENT
792#define DEFINE_EVENT(template, call, proto, args) \ 796#define DEFINE_EVENT(template, call, proto, args) \
793static notrace void ftrace_profile_##call(proto) \ 797static notrace void perf_trace_##call(proto) \
794{ \ 798{ \
795 struct ftrace_event_call *event_call = &event_##call; \ 799 struct ftrace_event_call *event_call = &event_##call; \
796 \ 800 \
797 ftrace_profile_templ_##template(event_call, args); \ 801 perf_trace_templ_##template(event_call, args); \
798} 802}
799 803
800#undef DEFINE_EVENT_PRINT 804#undef DEFINE_EVENT_PRINT
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 0387100752f0..e5e5f48dbfb3 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
47#endif 47#endif
48 48
49#ifdef CONFIG_PERF_EVENTS 49#ifdef CONFIG_PERF_EVENTS
50int prof_sysenter_enable(struct ftrace_event_call *call); 50int perf_sysenter_enable(struct ftrace_event_call *call);
51void prof_sysenter_disable(struct ftrace_event_call *call); 51void perf_sysenter_disable(struct ftrace_event_call *call);
52int prof_sysexit_enable(struct ftrace_event_call *call); 52int perf_sysexit_enable(struct ftrace_event_call *call);
53void prof_sysexit_disable(struct ftrace_event_call *call); 53void perf_sysexit_disable(struct ftrace_event_call *call);
54#endif 54#endif
55 55
56#endif /* _TRACE_SYSCALL_H */ 56#endif /* _TRACE_SYSCALL_H */