aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r--kernel/trace/trace.h342
1 files changed, 112 insertions, 230 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index ea7e0bcbd539..91c3d0e9a5a1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -7,10 +7,10 @@
7#include <linux/clocksource.h> 7#include <linux/clocksource.h>
8#include <linux/ring_buffer.h> 8#include <linux/ring_buffer.h>
9#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/tracepoint.h>
10#include <linux/ftrace.h> 11#include <linux/ftrace.h>
11#include <trace/boot.h> 12#include <trace/boot.h>
12#include <linux/kmemtrace.h> 13#include <linux/kmemtrace.h>
13#include <trace/power.h>
14 14
15#include <linux/trace_seq.h> 15#include <linux/trace_seq.h>
16#include <linux/ftrace_event.h> 16#include <linux/ftrace_event.h>
@@ -40,164 +40,60 @@ enum trace_type {
40 TRACE_HW_BRANCHES, 40 TRACE_HW_BRANCHES,
41 TRACE_KMEM_ALLOC, 41 TRACE_KMEM_ALLOC,
42 TRACE_KMEM_FREE, 42 TRACE_KMEM_FREE,
43 TRACE_POWER,
44 TRACE_BLK, 43 TRACE_BLK,
45 TRACE_KSYM, 44 TRACE_KSYM,
46 45
47 __TRACE_LAST_TYPE, 46 __TRACE_LAST_TYPE,
48}; 47};
49 48
50/* 49enum kmemtrace_type_id {
51 * Function trace entry - function address and parent function addres: 50 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
52 */ 51 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
53struct ftrace_entry { 52 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
54 struct trace_entry ent;
55 unsigned long ip;
56 unsigned long parent_ip;
57};
58
59/* Function call entry */
60struct ftrace_graph_ent_entry {
61 struct trace_entry ent;
62 struct ftrace_graph_ent graph_ent;
63}; 53};
64 54
65/* Function return entry */
66struct ftrace_graph_ret_entry {
67 struct trace_entry ent;
68 struct ftrace_graph_ret ret;
69};
70extern struct tracer boot_tracer; 55extern struct tracer boot_tracer;
71 56
72/* 57#undef __field
73 * Context switch trace entry - which task (and prio) we switched from/to: 58#define __field(type, item) type item;
74 */
75struct ctx_switch_entry {
76 struct trace_entry ent;
77 unsigned int prev_pid;
78 unsigned char prev_prio;
79 unsigned char prev_state;
80 unsigned int next_pid;
81 unsigned char next_prio;
82 unsigned char next_state;
83 unsigned int next_cpu;
84};
85
86/*
87 * Special (free-form) trace entry:
88 */
89struct special_entry {
90 struct trace_entry ent;
91 unsigned long arg1;
92 unsigned long arg2;
93 unsigned long arg3;
94};
95
96/*
97 * Stack-trace entry:
98 */
99
100#define FTRACE_STACK_ENTRIES 8
101
102struct stack_entry {
103 struct trace_entry ent;
104 unsigned long caller[FTRACE_STACK_ENTRIES];
105};
106
107struct userstack_entry {
108 struct trace_entry ent;
109 unsigned long caller[FTRACE_STACK_ENTRIES];
110};
111
112/*
113 * trace_printk entry:
114 */
115struct bprint_entry {
116 struct trace_entry ent;
117 unsigned long ip;
118 const char *fmt;
119 u32 buf[];
120};
121
122struct print_entry {
123 struct trace_entry ent;
124 unsigned long ip;
125 char buf[];
126};
127
128#define TRACE_OLD_SIZE 88
129
130struct trace_field_cont {
131 unsigned char type;
132 /* Temporary till we get rid of this completely */
133 char buf[TRACE_OLD_SIZE - 1];
134};
135 59
136struct trace_mmiotrace_rw { 60#undef __field_struct
137 struct trace_entry ent; 61#define __field_struct(type, item) __field(type, item)
138 struct mmiotrace_rw rw;
139};
140 62
141struct trace_mmiotrace_map { 63#undef __field_desc
142 struct trace_entry ent; 64#define __field_desc(type, container, item)
143 struct mmiotrace_map map;
144};
145 65
146struct trace_boot_call { 66#undef __array
147 struct trace_entry ent; 67#define __array(type, item, size) type item[size];
148 struct boot_trace_call boot_call;
149};
150 68
151struct trace_boot_ret { 69#undef __array_desc
152 struct trace_entry ent; 70#define __array_desc(type, container, item, size)
153 struct boot_trace_ret boot_ret;
154};
155 71
156#define TRACE_FUNC_SIZE 30 72#undef __dynamic_array
157#define TRACE_FILE_SIZE 20 73#define __dynamic_array(type, item) type item[];
158struct trace_branch {
159 struct trace_entry ent;
160 unsigned line;
161 char func[TRACE_FUNC_SIZE+1];
162 char file[TRACE_FILE_SIZE+1];
163 char correct;
164};
165 74
166struct hw_branch_entry { 75#undef F_STRUCT
167 struct trace_entry ent; 76#define F_STRUCT(args...) args
168 u64 from;
169 u64 to;
170};
171 77
172struct trace_power { 78#undef FTRACE_ENTRY
173 struct trace_entry ent; 79#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
174 struct power_trace state_data; 80 struct struct_name { \
175}; 81 struct trace_entry ent; \
82 tstruct \
83 }
176 84
177enum kmemtrace_type_id { 85#undef TP_ARGS
178 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ 86#define TP_ARGS(args...) args
179 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
180 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
181};
182 87
183struct kmemtrace_alloc_entry { 88#undef FTRACE_ENTRY_DUP
184 struct trace_entry ent; 89#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
185 enum kmemtrace_type_id type_id;
186 unsigned long call_site;
187 const void *ptr;
188 size_t bytes_req;
189 size_t bytes_alloc;
190 gfp_t gfp_flags;
191 int node;
192};
193 90
194struct kmemtrace_free_entry { 91#include "trace_entries.h"
195 struct trace_entry ent;
196 enum kmemtrace_type_id type_id;
197 unsigned long call_site;
198 const void *ptr;
199};
200 92
93/*
94 * syscalls are special, and need special handling, this is why
95 * they are not included in trace_entries.h
96 */
201struct syscall_trace_enter { 97struct syscall_trace_enter {
202 struct trace_entry ent; 98 struct trace_entry ent;
203 int nr; 99 int nr;
@@ -210,23 +106,12 @@ struct syscall_trace_exit {
210 unsigned long ret; 106 unsigned long ret;
211}; 107};
212 108
213#define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy"
214extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
215
216struct ksym_trace_entry {
217 struct trace_entry ent;
218 unsigned long ip;
219 unsigned char type;
220 char ksym_name[KSYM_NAME_LEN];
221 char cmd[TASK_COMM_LEN];
222};
223
224/* 109/*
225 * trace_flag_type is an enumeration that holds different 110 * trace_flag_type is an enumeration that holds different
226 * states when a trace occurs. These are: 111 * states when a trace occurs. These are:
227 * IRQS_OFF - interrupts were disabled 112 * IRQS_OFF - interrupts were disabled
228 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 113 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
229 * NEED_RESCED - reschedule is requested 114 * NEED_RESCHED - reschedule is requested
230 * HARDIRQ - inside an interrupt handler 115 * HARDIRQ - inside an interrupt handler
231 * SOFTIRQ - inside a softirq handler 116 * SOFTIRQ - inside a softirq handler
232 */ 117 */
@@ -325,7 +210,6 @@ extern void __ftrace_bad_type(void);
325 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 210 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
326 TRACE_GRAPH_RET); \ 211 TRACE_GRAPH_RET); \
327 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ 212 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
328 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
329 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ 213 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
330 TRACE_KMEM_ALLOC); \ 214 TRACE_KMEM_ALLOC); \
331 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ 215 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
@@ -406,7 +290,6 @@ struct tracer {
406 struct tracer *next; 290 struct tracer *next;
407 int print_max; 291 int print_max;
408 struct tracer_flags *flags; 292 struct tracer_flags *flags;
409 struct tracer_stat *stats;
410}; 293};
411 294
412 295
@@ -485,6 +368,10 @@ void tracing_stop_sched_switch_record(void);
485void tracing_start_sched_switch_record(void); 368void tracing_start_sched_switch_record(void);
486int register_tracer(struct tracer *type); 369int register_tracer(struct tracer *type);
487void unregister_tracer(struct tracer *type); 370void unregister_tracer(struct tracer *type);
371int is_tracing_stopped(void);
372
373#define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy"
374extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
488 375
489extern unsigned long nsecs_to_usecs(unsigned long nsecs); 376extern unsigned long nsecs_to_usecs(unsigned long nsecs);
490 377
@@ -525,20 +412,6 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
525 412
526extern cycle_t ftrace_now(int cpu); 413extern cycle_t ftrace_now(int cpu);
527 414
528#ifdef CONFIG_CONTEXT_SWITCH_TRACER
529typedef void
530(*tracer_switch_func_t)(void *private,
531 void *__rq,
532 struct task_struct *prev,
533 struct task_struct *next);
534
535struct tracer_switch_ops {
536 tracer_switch_func_t func;
537 void *private;
538 struct tracer_switch_ops *next;
539};
540#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
541
542extern void trace_find_cmdline(int pid, char comm[]); 415extern void trace_find_cmdline(int pid, char comm[]);
543 416
544#ifdef CONFIG_DYNAMIC_FTRACE 417#ifdef CONFIG_DYNAMIC_FTRACE
@@ -621,10 +494,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
621 return 0; 494 return 0;
622} 495}
623#else 496#else
624static inline int ftrace_trace_addr(unsigned long addr)
625{
626 return 1;
627}
628static inline int ftrace_graph_addr(unsigned long addr) 497static inline int ftrace_graph_addr(unsigned long addr)
629{ 498{
630 return 1; 499 return 1;
@@ -638,12 +507,12 @@ print_graph_function(struct trace_iterator *iter)
638} 507}
639#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 508#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
640 509
641extern struct pid *ftrace_pid_trace; 510extern struct list_head ftrace_pids;
642 511
643#ifdef CONFIG_FUNCTION_TRACER 512#ifdef CONFIG_FUNCTION_TRACER
644static inline int ftrace_trace_task(struct task_struct *task) 513static inline int ftrace_trace_task(struct task_struct *task)
645{ 514{
646 if (!ftrace_pid_trace) 515 if (list_empty(&ftrace_pids))
647 return 1; 516 return 1;
648 517
649 return test_tsk_trace_trace(task); 518 return test_tsk_trace_trace(task);
@@ -656,6 +525,41 @@ static inline int ftrace_trace_task(struct task_struct *task)
656#endif 525#endif
657 526
658/* 527/*
528 * struct trace_parser - servers for reading the user input separated by spaces
529 * @cont: set if the input is not complete - no final space char was found
530 * @buffer: holds the parsed user input
531 * @idx: user input lenght
532 * @size: buffer size
533 */
534struct trace_parser {
535 bool cont;
536 char *buffer;
537 unsigned idx;
538 unsigned size;
539};
540
541static inline bool trace_parser_loaded(struct trace_parser *parser)
542{
543 return (parser->idx != 0);
544}
545
546static inline bool trace_parser_cont(struct trace_parser *parser)
547{
548 return parser->cont;
549}
550
551static inline void trace_parser_clear(struct trace_parser *parser)
552{
553 parser->cont = false;
554 parser->idx = 0;
555}
556
557extern int trace_parser_get_init(struct trace_parser *parser, int size);
558extern void trace_parser_put(struct trace_parser *parser);
559extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
560 size_t cnt, loff_t *ppos);
561
562/*
659 * trace_iterator_flags is an enumeration that defines bit 563 * trace_iterator_flags is an enumeration that defines bit
660 * positions into trace_flags that controls the output. 564 * positions into trace_flags that controls the output.
661 * 565 *
@@ -790,7 +694,6 @@ struct event_filter {
790 int n_preds; 694 int n_preds;
791 struct filter_pred **preds; 695 struct filter_pred **preds;
792 char *filter_string; 696 char *filter_string;
793 bool no_reset;
794}; 697};
795 698
796struct event_subsystem { 699struct event_subsystem {
@@ -802,22 +705,40 @@ struct event_subsystem {
802}; 705};
803 706
804struct filter_pred; 707struct filter_pred;
708struct regex;
805 709
806typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, 710typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
807 int val1, int val2); 711 int val1, int val2);
808 712
809struct filter_pred { 713typedef int (*regex_match_func)(char *str, struct regex *r, int len);
810 filter_pred_fn_t fn; 714
811 u64 val; 715enum regex_type {
812 char str_val[MAX_FILTER_STR_VAL]; 716 MATCH_FULL = 0,
813 int str_len; 717 MATCH_FRONT_ONLY,
814 char *field_name; 718 MATCH_MIDDLE_ONLY,
815 int offset; 719 MATCH_END_ONLY,
816 int not; 720};
817 int op; 721
818 int pop_n; 722struct regex {
723 char pattern[MAX_FILTER_STR_VAL];
724 int len;
725 int field_len;
726 regex_match_func match;
819}; 727};
820 728
729struct filter_pred {
730 filter_pred_fn_t fn;
731 u64 val;
732 struct regex regex;
733 char *field_name;
734 int offset;
735 int not;
736 int op;
737 int pop_n;
738};
739
740extern enum regex_type
741filter_parse_regex(char *buff, int len, char **search, int *not);
821extern void print_event_filter(struct ftrace_event_call *call, 742extern void print_event_filter(struct ftrace_event_call *call,
822 struct trace_seq *s); 743 struct trace_seq *s);
823extern int apply_event_filter(struct ftrace_event_call *call, 744extern int apply_event_filter(struct ftrace_event_call *call,
@@ -833,7 +754,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
833 struct ring_buffer *buffer, 754 struct ring_buffer *buffer,
834 struct ring_buffer_event *event) 755 struct ring_buffer_event *event)
835{ 756{
836 if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { 757 if (unlikely(call->filter_active) &&
758 !filter_match_preds(call->filter, rec)) {
837 ring_buffer_discard_commit(buffer, event); 759 ring_buffer_discard_commit(buffer, event);
838 return 1; 760 return 1;
839 } 761 }
@@ -841,58 +763,18 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
841 return 0; 763 return 0;
842} 764}
843 765
844#define DEFINE_COMPARISON_PRED(type) \
845static int filter_pred_##type(struct filter_pred *pred, void *event, \
846 int val1, int val2) \
847{ \
848 type *addr = (type *)(event + pred->offset); \
849 type val = (type)pred->val; \
850 int match = 0; \
851 \
852 switch (pred->op) { \
853 case OP_LT: \
854 match = (*addr < val); \
855 break; \
856 case OP_LE: \
857 match = (*addr <= val); \
858 break; \
859 case OP_GT: \
860 match = (*addr > val); \
861 break; \
862 case OP_GE: \
863 match = (*addr >= val); \
864 break; \
865 default: \
866 break; \
867 } \
868 \
869 return match; \
870}
871
872#define DEFINE_EQUALITY_PRED(size) \
873static int filter_pred_##size(struct filter_pred *pred, void *event, \
874 int val1, int val2) \
875{ \
876 u##size *addr = (u##size *)(event + pred->offset); \
877 u##size val = (u##size)pred->val; \
878 int match; \
879 \
880 match = (val == *addr) ^ pred->not; \
881 \
882 return match; \
883}
884
885extern struct mutex event_mutex; 766extern struct mutex event_mutex;
886extern struct list_head ftrace_events; 767extern struct list_head ftrace_events;
887 768
888extern const char *__start___trace_bprintk_fmt[]; 769extern const char *__start___trace_bprintk_fmt[];
889extern const char *__stop___trace_bprintk_fmt[]; 770extern const char *__stop___trace_bprintk_fmt[];
890 771
891#undef TRACE_EVENT_FORMAT 772#undef FTRACE_ENTRY
892#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ 773#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
893 extern struct ftrace_event_call event_##call; 774 extern struct ftrace_event_call event_##call;
894#undef TRACE_EVENT_FORMAT_NOFILTER 775#undef FTRACE_ENTRY_DUP
895#define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, tpfmt) 776#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
896#include "trace_event_types.h" 777 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
778#include "trace_entries.h"
897 779
898#endif /* _LINUX_KERNEL_TRACE_H */ 780#endif /* _LINUX_KERNEL_TRACE_H */