diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 16:55:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 16:55:38 -0400 |
commit | 9e8529afc4518f4e5d610001545ebc97e1333c79 (patch) | |
tree | 26e1aa2cbb50f3f511cfa7d8e39e6b7bd9221b68 /kernel/trace/trace.h | |
parent | ec25e246b94a3233ab064994ef05a170bdba0e7c (diff) | |
parent | 4c69e6ea415a35eb7f0fc8ee9390c8f7436492a2 (diff) |
Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"Along with the usual minor fixes and clean ups there are a few major
changes with this pull request.
1) Multiple buffers for the ftrace facility
This feature has been requested by many people over the last few
years. I even heard that Google was about to implement it themselves.
I finally had time and cleaned up the code such that you can now
create multiple instances of the ftrace buffer and have different
events go to different buffers. This way, a low frequency event will
not be lost in the noise of a high frequency event.
Note, currently only events can go to different buffers, the tracers
(ie function, function_graph and the latency tracers) still can only
be written to the main buffer.
2) The function tracer triggers have now been extended.
The function tracer had two triggers. One to enable tracing when a
function is hit, and one to disable tracing. Now you can record a
stack trace on a single (or many) function(s), take a snapshot of the
buffer (copy it to the snapshot buffer), and you can enable or disable
an event to be traced when a function is hit.
3) A perf clock has been added.
A "perf" clock can be chosen to be used when tracing. This will cause
ftrace to use the same clock as perf uses, and hopefully this will
make it easier to interleave the perf and ftrace data for analysis."
* tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (82 commits)
tracepoints: Prevent null probe from being added
tracing: Compare to 1 instead of zero for is_signed_type()
tracing: Remove obsolete macro guard _TRACE_PROFILE_INIT
ftrace: Get rid of ftrace_profile_bits
tracing: Check return value of tracing_init_dentry()
tracing: Get rid of unneeded key calculation in ftrace_hash_move()
tracing: Reset ftrace_graph_filter_enabled if count is zero
tracing: Fix off-by-one on allocating stat->pages
kernel: tracing: Use strlcpy instead of strncpy
tracing: Update debugfs README file
tracing: Fix ftrace_dump()
tracing: Rename trace_event_mutex to trace_event_sem
tracing: Fix comment about prefix in arch_syscall_match_sym_name()
tracing: Convert trace_destroy_fields() to static
tracing: Move find_event_field() into trace_events.c
tracing: Use TRACE_MAX_PRINT instead of constant
tracing: Use pr_warn_once instead of open coded implementation
ring-buffer: Add ring buffer startup selftest
tracing: Bring Documentation/trace/ftrace.txt up to date
tracing: Add "perf" trace_clock
...
Conflicts:
kernel/trace/ftrace.c
kernel/trace/trace.c
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r-- | kernel/trace/trace.h | 144 |
1 files changed, 122 insertions, 22 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2081971367ea..9e014582e763 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -13,6 +13,11 @@ | |||
13 | #include <linux/trace_seq.h> | 13 | #include <linux/trace_seq.h> |
14 | #include <linux/ftrace_event.h> | 14 | #include <linux/ftrace_event.h> |
15 | 15 | ||
16 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
17 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | ||
18 | #include <asm/syscall.h> /* some archs define it here */ | ||
19 | #endif | ||
20 | |||
16 | enum trace_type { | 21 | enum trace_type { |
17 | __TRACE_FIRST_TYPE = 0, | 22 | __TRACE_FIRST_TYPE = 0, |
18 | 23 | ||
@@ -29,6 +34,7 @@ enum trace_type { | |||
29 | TRACE_GRAPH_ENT, | 34 | TRACE_GRAPH_ENT, |
30 | TRACE_USER_STACK, | 35 | TRACE_USER_STACK, |
31 | TRACE_BLK, | 36 | TRACE_BLK, |
37 | TRACE_BPUTS, | ||
32 | 38 | ||
33 | __TRACE_LAST_TYPE, | 39 | __TRACE_LAST_TYPE, |
34 | }; | 40 | }; |
@@ -127,12 +133,21 @@ enum trace_flag_type { | |||
127 | 133 | ||
128 | #define TRACE_BUF_SIZE 1024 | 134 | #define TRACE_BUF_SIZE 1024 |
129 | 135 | ||
136 | struct trace_array; | ||
137 | |||
138 | struct trace_cpu { | ||
139 | struct trace_array *tr; | ||
140 | struct dentry *dir; | ||
141 | int cpu; | ||
142 | }; | ||
143 | |||
130 | /* | 144 | /* |
131 | * The CPU trace array - it consists of thousands of trace entries | 145 | * The CPU trace array - it consists of thousands of trace entries |
132 | * plus some other descriptor data: (for example which task started | 146 | * plus some other descriptor data: (for example which task started |
133 | * the trace, etc.) | 147 | * the trace, etc.) |
134 | */ | 148 | */ |
135 | struct trace_array_cpu { | 149 | struct trace_array_cpu { |
150 | struct trace_cpu trace_cpu; | ||
136 | atomic_t disabled; | 151 | atomic_t disabled; |
137 | void *buffer_page; /* ring buffer spare */ | 152 | void *buffer_page; /* ring buffer spare */ |
138 | 153 | ||
@@ -151,20 +166,83 @@ struct trace_array_cpu { | |||
151 | char comm[TASK_COMM_LEN]; | 166 | char comm[TASK_COMM_LEN]; |
152 | }; | 167 | }; |
153 | 168 | ||
169 | struct tracer; | ||
170 | |||
171 | struct trace_buffer { | ||
172 | struct trace_array *tr; | ||
173 | struct ring_buffer *buffer; | ||
174 | struct trace_array_cpu __percpu *data; | ||
175 | cycle_t time_start; | ||
176 | int cpu; | ||
177 | }; | ||
178 | |||
154 | /* | 179 | /* |
155 | * The trace array - an array of per-CPU trace arrays. This is the | 180 | * The trace array - an array of per-CPU trace arrays. This is the |
156 | * highest level data structure that individual tracers deal with. | 181 | * highest level data structure that individual tracers deal with. |
157 | * They have on/off state as well: | 182 | * They have on/off state as well: |
158 | */ | 183 | */ |
159 | struct trace_array { | 184 | struct trace_array { |
160 | struct ring_buffer *buffer; | 185 | struct list_head list; |
161 | int cpu; | 186 | char *name; |
187 | struct trace_buffer trace_buffer; | ||
188 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
189 | /* | ||
190 | * The max_buffer is used to snapshot the trace when a maximum | ||
191 | * latency is reached, or when the user initiates a snapshot. | ||
192 | * Some tracers will use this to store a maximum trace while | ||
193 | * it continues examining live traces. | ||
194 | * | ||
195 | * The buffers for the max_buffer are set up the same as the trace_buffer | ||
196 | * When a snapshot is taken, the buffer of the max_buffer is swapped | ||
197 | * with the buffer of the trace_buffer and the buffers are reset for | ||
198 | * the trace_buffer so the tracing can continue. | ||
199 | */ | ||
200 | struct trace_buffer max_buffer; | ||
201 | bool allocated_snapshot; | ||
202 | #endif | ||
162 | int buffer_disabled; | 203 | int buffer_disabled; |
163 | cycle_t time_start; | 204 | struct trace_cpu trace_cpu; /* place holder */ |
205 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
206 | int sys_refcount_enter; | ||
207 | int sys_refcount_exit; | ||
208 | DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | ||
209 | DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | ||
210 | #endif | ||
211 | int stop_count; | ||
212 | int clock_id; | ||
213 | struct tracer *current_trace; | ||
214 | unsigned int flags; | ||
215 | raw_spinlock_t start_lock; | ||
216 | struct dentry *dir; | ||
217 | struct dentry *options; | ||
218 | struct dentry *percpu_dir; | ||
219 | struct dentry *event_dir; | ||
220 | struct list_head systems; | ||
221 | struct list_head events; | ||
164 | struct task_struct *waiter; | 222 | struct task_struct *waiter; |
165 | struct trace_array_cpu *data[NR_CPUS]; | 223 | int ref; |
166 | }; | 224 | }; |
167 | 225 | ||
226 | enum { | ||
227 | TRACE_ARRAY_FL_GLOBAL = (1 << 0) | ||
228 | }; | ||
229 | |||
230 | extern struct list_head ftrace_trace_arrays; | ||
231 | |||
232 | /* | ||
233 | * The global tracer (top) should be the first trace array added, | ||
234 | * but we check the flag anyway. | ||
235 | */ | ||
236 | static inline struct trace_array *top_trace_array(void) | ||
237 | { | ||
238 | struct trace_array *tr; | ||
239 | |||
240 | tr = list_entry(ftrace_trace_arrays.prev, | ||
241 | typeof(*tr), list); | ||
242 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); | ||
243 | return tr; | ||
244 | } | ||
245 | |||
168 | #define FTRACE_CMP_TYPE(var, type) \ | 246 | #define FTRACE_CMP_TYPE(var, type) \ |
169 | __builtin_types_compatible_p(typeof(var), type *) | 247 | __builtin_types_compatible_p(typeof(var), type *) |
170 | 248 | ||
@@ -200,6 +278,7 @@ extern void __ftrace_bad_type(void); | |||
200 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 278 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
201 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 279 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
202 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ | 280 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
281 | IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ | ||
203 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 282 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
204 | TRACE_MMIO_RW); \ | 283 | TRACE_MMIO_RW); \ |
205 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 284 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
@@ -289,9 +368,10 @@ struct tracer { | |||
289 | struct tracer *next; | 368 | struct tracer *next; |
290 | struct tracer_flags *flags; | 369 | struct tracer_flags *flags; |
291 | bool print_max; | 370 | bool print_max; |
292 | bool use_max_tr; | ||
293 | bool allocated_snapshot; | ||
294 | bool enabled; | 371 | bool enabled; |
372 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
373 | bool use_max_tr; | ||
374 | #endif | ||
295 | }; | 375 | }; |
296 | 376 | ||
297 | 377 | ||
@@ -427,8 +507,6 @@ static __always_inline void trace_clear_recursion(int bit) | |||
427 | current->trace_recursion = val; | 507 | current->trace_recursion = val; |
428 | } | 508 | } |
429 | 509 | ||
430 | #define TRACE_PIPE_ALL_CPU -1 | ||
431 | |||
432 | static inline struct ring_buffer_iter * | 510 | static inline struct ring_buffer_iter * |
433 | trace_buffer_iter(struct trace_iterator *iter, int cpu) | 511 | trace_buffer_iter(struct trace_iterator *iter, int cpu) |
434 | { | 512 | { |
@@ -439,10 +517,10 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu) | |||
439 | 517 | ||
440 | int tracer_init(struct tracer *t, struct trace_array *tr); | 518 | int tracer_init(struct tracer *t, struct trace_array *tr); |
441 | int tracing_is_enabled(void); | 519 | int tracing_is_enabled(void); |
442 | void tracing_reset(struct trace_array *tr, int cpu); | 520 | void tracing_reset(struct trace_buffer *buf, int cpu); |
443 | void tracing_reset_online_cpus(struct trace_array *tr); | 521 | void tracing_reset_online_cpus(struct trace_buffer *buf); |
444 | void tracing_reset_current(int cpu); | 522 | void tracing_reset_current(int cpu); |
445 | void tracing_reset_current_online_cpus(void); | 523 | void tracing_reset_all_online_cpus(void); |
446 | int tracing_open_generic(struct inode *inode, struct file *filp); | 524 | int tracing_open_generic(struct inode *inode, struct file *filp); |
447 | struct dentry *trace_create_file(const char *name, | 525 | struct dentry *trace_create_file(const char *name, |
448 | umode_t mode, | 526 | umode_t mode, |
@@ -450,6 +528,7 @@ struct dentry *trace_create_file(const char *name, | |||
450 | void *data, | 528 | void *data, |
451 | const struct file_operations *fops); | 529 | const struct file_operations *fops); |
452 | 530 | ||
531 | struct dentry *tracing_init_dentry_tr(struct trace_array *tr); | ||
453 | struct dentry *tracing_init_dentry(void); | 532 | struct dentry *tracing_init_dentry(void); |
454 | 533 | ||
455 | struct ring_buffer_event; | 534 | struct ring_buffer_event; |
@@ -583,7 +662,7 @@ extern int DYN_FTRACE_TEST_NAME(void); | |||
583 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 | 662 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 |
584 | extern int DYN_FTRACE_TEST_NAME2(void); | 663 | extern int DYN_FTRACE_TEST_NAME2(void); |
585 | 664 | ||
586 | extern int ring_buffer_expanded; | 665 | extern bool ring_buffer_expanded; |
587 | extern bool tracing_selftest_disabled; | 666 | extern bool tracing_selftest_disabled; |
588 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); | 667 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
589 | 668 | ||
@@ -619,6 +698,8 @@ trace_array_vprintk(struct trace_array *tr, | |||
619 | unsigned long ip, const char *fmt, va_list args); | 698 | unsigned long ip, const char *fmt, va_list args); |
620 | int trace_array_printk(struct trace_array *tr, | 699 | int trace_array_printk(struct trace_array *tr, |
621 | unsigned long ip, const char *fmt, ...); | 700 | unsigned long ip, const char *fmt, ...); |
701 | int trace_array_printk_buf(struct ring_buffer *buffer, | ||
702 | unsigned long ip, const char *fmt, ...); | ||
622 | void trace_printk_seq(struct trace_seq *s); | 703 | void trace_printk_seq(struct trace_seq *s); |
623 | enum print_line_t print_trace_line(struct trace_iterator *iter); | 704 | enum print_line_t print_trace_line(struct trace_iterator *iter); |
624 | 705 | ||
@@ -786,6 +867,7 @@ enum trace_iterator_flags { | |||
786 | TRACE_ITER_STOP_ON_FREE = 0x400000, | 867 | TRACE_ITER_STOP_ON_FREE = 0x400000, |
787 | TRACE_ITER_IRQ_INFO = 0x800000, | 868 | TRACE_ITER_IRQ_INFO = 0x800000, |
788 | TRACE_ITER_MARKERS = 0x1000000, | 869 | TRACE_ITER_MARKERS = 0x1000000, |
870 | TRACE_ITER_FUNCTION = 0x2000000, | ||
789 | }; | 871 | }; |
790 | 872 | ||
791 | /* | 873 | /* |
@@ -832,8 +914,8 @@ enum { | |||
832 | 914 | ||
833 | struct ftrace_event_field { | 915 | struct ftrace_event_field { |
834 | struct list_head link; | 916 | struct list_head link; |
835 | char *name; | 917 | const char *name; |
836 | char *type; | 918 | const char *type; |
837 | int filter_type; | 919 | int filter_type; |
838 | int offset; | 920 | int offset; |
839 | int size; | 921 | int size; |
@@ -851,12 +933,19 @@ struct event_filter { | |||
851 | struct event_subsystem { | 933 | struct event_subsystem { |
852 | struct list_head list; | 934 | struct list_head list; |
853 | const char *name; | 935 | const char *name; |
854 | struct dentry *entry; | ||
855 | struct event_filter *filter; | 936 | struct event_filter *filter; |
856 | int nr_events; | ||
857 | int ref_count; | 937 | int ref_count; |
858 | }; | 938 | }; |
859 | 939 | ||
940 | struct ftrace_subsystem_dir { | ||
941 | struct list_head list; | ||
942 | struct event_subsystem *subsystem; | ||
943 | struct trace_array *tr; | ||
944 | struct dentry *entry; | ||
945 | int ref_count; | ||
946 | int nr_events; | ||
947 | }; | ||
948 | |||
860 | #define FILTER_PRED_INVALID ((unsigned short)-1) | 949 | #define FILTER_PRED_INVALID ((unsigned short)-1) |
861 | #define FILTER_PRED_IS_RIGHT (1 << 15) | 950 | #define FILTER_PRED_IS_RIGHT (1 << 15) |
862 | #define FILTER_PRED_FOLD (1 << 15) | 951 | #define FILTER_PRED_FOLD (1 << 15) |
@@ -906,22 +995,20 @@ struct filter_pred { | |||
906 | unsigned short right; | 995 | unsigned short right; |
907 | }; | 996 | }; |
908 | 997 | ||
909 | extern struct list_head ftrace_common_fields; | ||
910 | |||
911 | extern enum regex_type | 998 | extern enum regex_type |
912 | filter_parse_regex(char *buff, int len, char **search, int *not); | 999 | filter_parse_regex(char *buff, int len, char **search, int *not); |
913 | extern void print_event_filter(struct ftrace_event_call *call, | 1000 | extern void print_event_filter(struct ftrace_event_call *call, |
914 | struct trace_seq *s); | 1001 | struct trace_seq *s); |
915 | extern int apply_event_filter(struct ftrace_event_call *call, | 1002 | extern int apply_event_filter(struct ftrace_event_call *call, |
916 | char *filter_string); | 1003 | char *filter_string); |
917 | extern int apply_subsystem_event_filter(struct event_subsystem *system, | 1004 | extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, |
918 | char *filter_string); | 1005 | char *filter_string); |
919 | extern void print_subsystem_event_filter(struct event_subsystem *system, | 1006 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
920 | struct trace_seq *s); | 1007 | struct trace_seq *s); |
921 | extern int filter_assign_type(const char *type); | 1008 | extern int filter_assign_type(const char *type); |
922 | 1009 | ||
923 | struct list_head * | 1010 | struct ftrace_event_field * |
924 | trace_get_fields(struct ftrace_event_call *event_call); | 1011 | trace_find_event_field(struct ftrace_event_call *call, char *name); |
925 | 1012 | ||
926 | static inline int | 1013 | static inline int |
927 | filter_check_discard(struct ftrace_event_call *call, void *rec, | 1014 | filter_check_discard(struct ftrace_event_call *call, void *rec, |
@@ -938,6 +1025,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, | |||
938 | } | 1025 | } |
939 | 1026 | ||
940 | extern void trace_event_enable_cmd_record(bool enable); | 1027 | extern void trace_event_enable_cmd_record(bool enable); |
1028 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); | ||
1029 | extern int event_trace_del_tracer(struct trace_array *tr); | ||
941 | 1030 | ||
942 | extern struct mutex event_mutex; | 1031 | extern struct mutex event_mutex; |
943 | extern struct list_head ftrace_events; | 1032 | extern struct list_head ftrace_events; |
@@ -948,7 +1037,18 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
948 | void trace_printk_init_buffers(void); | 1037 | void trace_printk_init_buffers(void); |
949 | void trace_printk_start_comm(void); | 1038 | void trace_printk_start_comm(void); |
950 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); | 1039 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
951 | int set_tracer_flag(unsigned int mask, int enabled); | 1040 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
1041 | |||
1042 | /* | ||
1043 | * Normal trace_printk() and friends allocates special buffers | ||
1044 | * to do the manipulation, as well as saves the print formats | ||
1045 | * into sections to display. But the trace infrastructure wants | ||
1046 | * to use these without the added overhead at the price of being | ||
1047 | * a bit slower (used mainly for warnings, where we don't care | ||
1048 | * about performance). The internal_trace_puts() is for such | ||
1049 | * a purpose. | ||
1050 | */ | ||
1051 | #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) | ||
952 | 1052 | ||
953 | #undef FTRACE_ENTRY | 1053 | #undef FTRACE_ENTRY |
954 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 1054 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |