aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace_event.h3
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/trace/ftrace.h192
-rw-r--r--include/trace/syscall.h4
-rw-r--r--kernel/trace/ftrace.c51
-rw-r--r--kernel/trace/trace.c144
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_branch.c19
-rw-r--r--kernel/trace/trace_events.c77
-rw-r--r--kernel/trace/trace_export.c87
-rw-r--r--kernel/trace/trace_functions_graph.c27
-rw-r--r--kernel/trace/trace_kprobe.c104
-rw-r--r--kernel/trace/trace_syscalls.c106
-rwxr-xr-xscripts/recordmcount.pl39
14 files changed, 388 insertions, 474 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 2233c98d80df..84a5629adfd8 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -121,9 +121,8 @@ struct ftrace_event_call {
121 int (*regfunc)(struct ftrace_event_call *); 121 int (*regfunc)(struct ftrace_event_call *);
122 void (*unregfunc)(struct ftrace_event_call *); 122 void (*unregfunc)(struct ftrace_event_call *);
123 int id; 123 int id;
124 const char *print_fmt;
124 int (*raw_init)(struct ftrace_event_call *); 125 int (*raw_init)(struct ftrace_event_call *);
125 int (*show_format)(struct ftrace_event_call *,
126 struct trace_seq *);
127 int (*define_fields)(struct ftrace_event_call *); 126 int (*define_fields)(struct ftrace_event_call *);
128 struct list_head fields; 127 struct list_head fields;
129 int filter_active; 128 int filter_active;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 207466a49f3d..7b219696ad24 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -143,8 +143,7 @@ struct perf_event_attr;
143 .name = "sys_enter"#sname, \ 143 .name = "sys_enter"#sname, \
144 .system = "syscalls", \ 144 .system = "syscalls", \
145 .event = &enter_syscall_print_##sname, \ 145 .event = &enter_syscall_print_##sname, \
146 .raw_init = trace_event_raw_init, \ 146 .raw_init = init_syscall_trace, \
147 .show_format = syscall_enter_format, \
148 .define_fields = syscall_enter_define_fields, \ 147 .define_fields = syscall_enter_define_fields, \
149 .regfunc = reg_event_syscall_enter, \ 148 .regfunc = reg_event_syscall_enter, \
150 .unregfunc = unreg_event_syscall_enter, \ 149 .unregfunc = unreg_event_syscall_enter, \
@@ -165,8 +164,7 @@ struct perf_event_attr;
165 .name = "sys_exit"#sname, \ 164 .name = "sys_exit"#sname, \
166 .system = "syscalls", \ 165 .system = "syscalls", \
167 .event = &exit_syscall_print_##sname, \ 166 .event = &exit_syscall_print_##sname, \
168 .raw_init = trace_event_raw_init, \ 167 .raw_init = init_syscall_trace, \
169 .show_format = syscall_exit_format, \
170 .define_fields = syscall_exit_define_fields, \ 168 .define_fields = syscall_exit_define_fields, \
171 .regfunc = reg_event_syscall_exit, \ 169 .regfunc = reg_event_syscall_exit, \
172 .unregfunc = unreg_event_syscall_exit, \ 170 .unregfunc = unreg_event_syscall_exit, \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6fe03e902ca..09fd9afc0859 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -131,130 +131,6 @@
131#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 131#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
132 132
133/* 133/*
134 * Setup the showing format of trace point.
135 *
136 * int
137 * ftrace_format_##call(struct trace_seq *s)
138 * {
139 * struct ftrace_raw_##call field;
140 * int ret;
141 *
142 * ret = trace_seq_printf(s, #type " " #item ";"
143 * " offset:%u; size:%u;\n",
144 * offsetof(struct ftrace_raw_##call, item),
145 * sizeof(field.type));
146 *
147 * }
148 */
149
150#undef TP_STRUCT__entry
151#define TP_STRUCT__entry(args...) args
152
153#undef __field
154#define __field(type, item) \
155 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
156 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
157 (unsigned int)offsetof(typeof(field), item), \
158 (unsigned int)sizeof(field.item), \
159 (unsigned int)is_signed_type(type)); \
160 if (!ret) \
161 return 0;
162
163#undef __field_ext
164#define __field_ext(type, item, filter_type) __field(type, item)
165
166#undef __array
167#define __array(type, item, len) \
168 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
169 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
170 (unsigned int)offsetof(typeof(field), item), \
171 (unsigned int)sizeof(field.item), \
172 (unsigned int)is_signed_type(type)); \
173 if (!ret) \
174 return 0;
175
176#undef __dynamic_array
177#define __dynamic_array(type, item, len) \
178 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
179 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
180 (unsigned int)offsetof(typeof(field), \
181 __data_loc_##item), \
182 (unsigned int)sizeof(field.__data_loc_##item), \
183 (unsigned int)is_signed_type(type)); \
184 if (!ret) \
185 return 0;
186
187#undef __string
188#define __string(item, src) __dynamic_array(char, item, -1)
189
190#undef __entry
191#define __entry REC
192
193#undef __print_symbolic
194#undef __get_dynamic_array
195#undef __get_str
196
197#undef TP_printk
198#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
199
200#undef TP_fast_assign
201#define TP_fast_assign(args...) args
202
203#undef TP_perf_assign
204#define TP_perf_assign(args...)
205
206#undef DECLARE_EVENT_CLASS
207#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
208static int \
209ftrace_format_setup_##call(struct ftrace_event_call *unused, \
210 struct trace_seq *s) \
211{ \
212 struct ftrace_raw_##call field __attribute__((unused)); \
213 int ret = 0; \
214 \
215 tstruct; \
216 \
217 return ret; \
218} \
219 \
220static int \
221ftrace_format_##call(struct ftrace_event_call *unused, \
222 struct trace_seq *s) \
223{ \
224 int ret = 0; \
225 \
226 ret = ftrace_format_setup_##call(unused, s); \
227 if (!ret) \
228 return ret; \
229 \
230 ret = trace_seq_printf(s, "\nprint fmt: " print); \
231 \
232 return ret; \
233}
234
235#undef DEFINE_EVENT
236#define DEFINE_EVENT(template, name, proto, args)
237
238#undef DEFINE_EVENT_PRINT
239#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
240static int \
241ftrace_format_##name(struct ftrace_event_call *unused, \
242 struct trace_seq *s) \
243{ \
244 int ret = 0; \
245 \
246 ret = ftrace_format_setup_##template(unused, s); \
247 if (!ret) \
248 return ret; \
249 \
250 trace_seq_printf(s, "\nprint fmt: " print); \
251 \
252 return ret; \
253}
254
255#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
256
257/*
258 * Stage 3 of the trace events. 134 * Stage 3 of the trace events.
259 * 135 *
260 * Override the macros in <trace/trace_events.h> to include the following: 136 * Override the macros in <trace/trace_events.h> to include the following:
@@ -323,7 +199,7 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
323 199
324#undef DECLARE_EVENT_CLASS 200#undef DECLARE_EVENT_CLASS
325#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 201#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
326static enum print_line_t \ 202static notrace enum print_line_t \
327ftrace_raw_output_id_##call(int event_id, const char *name, \ 203ftrace_raw_output_id_##call(int event_id, const char *name, \
328 struct trace_iterator *iter, int flags) \ 204 struct trace_iterator *iter, int flags) \
329{ \ 205{ \
@@ -356,7 +232,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
356 232
357#undef DEFINE_EVENT 233#undef DEFINE_EVENT
358#define DEFINE_EVENT(template, name, proto, args) \ 234#define DEFINE_EVENT(template, name, proto, args) \
359static enum print_line_t \ 235static notrace enum print_line_t \
360ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ 236ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
361{ \ 237{ \
362 return ftrace_raw_output_id_##template(event_##name.id, \ 238 return ftrace_raw_output_id_##template(event_##name.id, \
@@ -365,7 +241,7 @@ ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
365 241
366#undef DEFINE_EVENT_PRINT 242#undef DEFINE_EVENT_PRINT
367#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 243#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
368static enum print_line_t \ 244static notrace enum print_line_t \
369ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 245ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
370{ \ 246{ \
371 struct trace_seq *s = &iter->seq; \ 247 struct trace_seq *s = &iter->seq; \
@@ -431,7 +307,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
431 307
432#undef DECLARE_EVENT_CLASS 308#undef DECLARE_EVENT_CLASS
433#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 309#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
434static int \ 310static int notrace \
435ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 311ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
436{ \ 312{ \
437 struct ftrace_raw_##call field; \ 313 struct ftrace_raw_##call field; \
@@ -479,7 +355,7 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
479 355
480#undef DECLARE_EVENT_CLASS 356#undef DECLARE_EVENT_CLASS
481#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 357#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
482static inline int ftrace_get_offsets_##call( \ 358static inline notrace int ftrace_get_offsets_##call( \
483 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 359 struct ftrace_data_offsets_##call *__data_offsets, proto) \
484{ \ 360{ \
485 int __data_size = 0; \ 361 int __data_size = 0; \
@@ -526,12 +402,14 @@ static inline int ftrace_get_offsets_##call( \
526 \ 402 \
527static void ftrace_profile_##name(proto); \ 403static void ftrace_profile_##name(proto); \
528 \ 404 \
529static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\ 405static notrace int \
406ftrace_profile_enable_##name(struct ftrace_event_call *unused) \
530{ \ 407{ \
531 return register_trace_##name(ftrace_profile_##name); \ 408 return register_trace_##name(ftrace_profile_##name); \
532} \ 409} \
533 \ 410 \
534static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ 411static notrace void \
412ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
535{ \ 413{ \
536 unregister_trace_##name(ftrace_profile_##name); \ 414 unregister_trace_##name(ftrace_profile_##name); \
537} 415}
@@ -622,7 +500,6 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
622 * .raw_init = trace_event_raw_init, 500 * .raw_init = trace_event_raw_init,
623 * .regfunc = ftrace_reg_event_<call>, 501 * .regfunc = ftrace_reg_event_<call>,
624 * .unregfunc = ftrace_unreg_event_<call>, 502 * .unregfunc = ftrace_unreg_event_<call>,
625 * .show_format = ftrace_format_<call>,
626 * } 503 * }
627 * 504 *
628 */ 505 */
@@ -657,10 +534,17 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
657#define __assign_str(dst, src) \ 534#define __assign_str(dst, src) \
658 strcpy(__get_str(dst), src); 535 strcpy(__get_str(dst), src);
659 536
537#undef TP_fast_assign
538#define TP_fast_assign(args...) args
539
540#undef TP_perf_assign
541#define TP_perf_assign(args...)
542
660#undef DECLARE_EVENT_CLASS 543#undef DECLARE_EVENT_CLASS
661#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 544#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
662 \ 545 \
663static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 546static notrace void \
547ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
664 proto) \ 548 proto) \
665{ \ 549{ \
666 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 550 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
@@ -697,17 +581,19 @@ static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
697#undef DEFINE_EVENT 581#undef DEFINE_EVENT
698#define DEFINE_EVENT(template, call, proto, args) \ 582#define DEFINE_EVENT(template, call, proto, args) \
699 \ 583 \
700static void ftrace_raw_event_##call(proto) \ 584static notrace void ftrace_raw_event_##call(proto) \
701{ \ 585{ \
702 ftrace_raw_event_id_##template(&event_##call, args); \ 586 ftrace_raw_event_id_##template(&event_##call, args); \
703} \ 587} \
704 \ 588 \
705static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ 589static notrace int \
590ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
706{ \ 591{ \
707 return register_trace_##call(ftrace_raw_event_##call); \ 592 return register_trace_##call(ftrace_raw_event_##call); \
708} \ 593} \
709 \ 594 \
710static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ 595static notrace void \
596ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
711{ \ 597{ \
712 unregister_trace_##call(ftrace_raw_event_##call); \ 598 unregister_trace_##call(ftrace_raw_event_##call); \
713} \ 599} \
@@ -722,8 +608,20 @@ static struct trace_event ftrace_event_type_##call = { \
722 608
723#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 609#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
724 610
611#undef __entry
612#define __entry REC
613
614#undef __print_flags
615#undef __print_symbolic
616#undef __get_dynamic_array
617#undef __get_str
618
619#undef TP_printk
620#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
621
725#undef DECLARE_EVENT_CLASS 622#undef DECLARE_EVENT_CLASS
726#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) 623#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
624static const char print_fmt_##call[] = print;
727 625
728#undef DEFINE_EVENT 626#undef DEFINE_EVENT
729#define DEFINE_EVENT(template, call, proto, args) \ 627#define DEFINE_EVENT(template, call, proto, args) \
@@ -737,7 +635,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
737 .raw_init = trace_event_raw_init, \ 635 .raw_init = trace_event_raw_init, \
738 .regfunc = ftrace_raw_reg_event_##call, \ 636 .regfunc = ftrace_raw_reg_event_##call, \
739 .unregfunc = ftrace_raw_unreg_event_##call, \ 637 .unregfunc = ftrace_raw_unreg_event_##call, \
740 .show_format = ftrace_format_##template, \ 638 .print_fmt = print_fmt_##template, \
741 .define_fields = ftrace_define_fields_##template, \ 639 .define_fields = ftrace_define_fields_##template, \
742 _TRACE_PROFILE_INIT(call) \ 640 _TRACE_PROFILE_INIT(call) \
743} 641}
@@ -745,6 +643,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
745#undef DEFINE_EVENT_PRINT 643#undef DEFINE_EVENT_PRINT
746#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 644#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
747 \ 645 \
646static const char print_fmt_##call[] = print; \
647 \
748static struct ftrace_event_call __used \ 648static struct ftrace_event_call __used \
749__attribute__((__aligned__(4))) \ 649__attribute__((__aligned__(4))) \
750__attribute__((section("_ftrace_events"))) event_##call = { \ 650__attribute__((section("_ftrace_events"))) event_##call = { \
@@ -754,7 +654,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
754 .raw_init = trace_event_raw_init, \ 654 .raw_init = trace_event_raw_init, \
755 .regfunc = ftrace_raw_reg_event_##call, \ 655 .regfunc = ftrace_raw_reg_event_##call, \
756 .unregfunc = ftrace_raw_unreg_event_##call, \ 656 .unregfunc = ftrace_raw_unreg_event_##call, \
757 .show_format = ftrace_format_##call, \ 657 .print_fmt = print_fmt_##call, \
758 .define_fields = ftrace_define_fields_##template, \ 658 .define_fields = ftrace_define_fields_##template, \
759 _TRACE_PROFILE_INIT(call) \ 659 _TRACE_PROFILE_INIT(call) \
760} 660}
@@ -837,6 +737,16 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
837 737
838#ifdef CONFIG_EVENT_PROFILE 738#ifdef CONFIG_EVENT_PROFILE
839 739
740#undef __entry
741#define __entry entry
742
743#undef __get_dynamic_array
744#define __get_dynamic_array(field) \
745 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
746
747#undef __get_str
748#define __get_str(field) (char *)__get_dynamic_array(field)
749
840#undef __perf_addr 750#undef __perf_addr
841#define __perf_addr(a) __addr = (a) 751#define __perf_addr(a) __addr = (a)
842 752
@@ -845,7 +755,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
845 755
846#undef DECLARE_EVENT_CLASS 756#undef DECLARE_EVENT_CLASS
847#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 757#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
848static void \ 758static notrace void \
849ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 759ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
850 proto) \ 760 proto) \
851{ \ 761{ \
@@ -915,7 +825,7 @@ end_recursion: \
915 825
916#undef DEFINE_EVENT 826#undef DEFINE_EVENT
917#define DEFINE_EVENT(template, call, proto, args) \ 827#define DEFINE_EVENT(template, call, proto, args) \
918static void ftrace_profile_##call(proto) \ 828static notrace void ftrace_profile_##call(proto) \
919{ \ 829{ \
920 struct ftrace_event_call *event_call = &event_##call; \ 830 struct ftrace_event_call *event_call = &event_##call; \
921 \ 831 \
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 961fda3556bb..8cd410254456 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -34,10 +34,6 @@ struct syscall_metadata {
34extern unsigned long arch_syscall_addr(int nr); 34extern unsigned long arch_syscall_addr(int nr);
35extern int init_syscall_trace(struct ftrace_event_call *call); 35extern int init_syscall_trace(struct ftrace_event_call *call);
36 36
37extern int syscall_enter_format(struct ftrace_event_call *call,
38 struct trace_seq *s);
39extern int syscall_exit_format(struct ftrace_event_call *call,
40 struct trace_seq *s);
41extern int syscall_enter_define_fields(struct ftrace_event_call *call); 37extern int syscall_enter_define_fields(struct ftrace_event_call *call);
42extern int syscall_exit_define_fields(struct ftrace_event_call *call); 38extern int syscall_exit_define_fields(struct ftrace_event_call *call);
43extern int reg_event_syscall_enter(struct ftrace_event_call *call); 39extern int reg_event_syscall_enter(struct ftrace_event_call *call);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1e6640f80454..d996353473fd 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2426,6 +2426,7 @@ static const struct file_operations ftrace_notrace_fops = {
2426static DEFINE_MUTEX(graph_lock); 2426static DEFINE_MUTEX(graph_lock);
2427 2427
2428int ftrace_graph_count; 2428int ftrace_graph_count;
2429int ftrace_graph_filter_enabled;
2429unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 2430unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2430 2431
2431static void * 2432static void *
@@ -2448,7 +2449,7 @@ static void *g_start(struct seq_file *m, loff_t *pos)
2448 mutex_lock(&graph_lock); 2449 mutex_lock(&graph_lock);
2449 2450
2450 /* Nothing, tell g_show to print all functions are enabled */ 2451 /* Nothing, tell g_show to print all functions are enabled */
2451 if (!ftrace_graph_count && !*pos) 2452 if (!ftrace_graph_filter_enabled && !*pos)
2452 return (void *)1; 2453 return (void *)1;
2453 2454
2454 return __g_next(m, pos); 2455 return __g_next(m, pos);
@@ -2494,6 +2495,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2494 mutex_lock(&graph_lock); 2495 mutex_lock(&graph_lock);
2495 if ((file->f_mode & FMODE_WRITE) && 2496 if ((file->f_mode & FMODE_WRITE) &&
2496 (file->f_flags & O_TRUNC)) { 2497 (file->f_flags & O_TRUNC)) {
2498 ftrace_graph_filter_enabled = 0;
2497 ftrace_graph_count = 0; 2499 ftrace_graph_count = 0;
2498 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2500 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2499 } 2501 }
@@ -2519,7 +2521,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2519 struct dyn_ftrace *rec; 2521 struct dyn_ftrace *rec;
2520 struct ftrace_page *pg; 2522 struct ftrace_page *pg;
2521 int search_len; 2523 int search_len;
2522 int found = 0; 2524 int fail = 1;
2523 int type, not; 2525 int type, not;
2524 char *search; 2526 char *search;
2525 bool exists; 2527 bool exists;
@@ -2530,37 +2532,51 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2530 2532
2531 /* decode regex */ 2533 /* decode regex */
2532 type = filter_parse_regex(buffer, strlen(buffer), &search, &not); 2534 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2533 if (not) 2535 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2534 return -EINVAL; 2536 return -EBUSY;
2535 2537
2536 search_len = strlen(search); 2538 search_len = strlen(search);
2537 2539
2538 mutex_lock(&ftrace_lock); 2540 mutex_lock(&ftrace_lock);
2539 do_for_each_ftrace_rec(pg, rec) { 2541 do_for_each_ftrace_rec(pg, rec) {
2540 2542
2541 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2542 break;
2543
2544 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) 2543 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2545 continue; 2544 continue;
2546 2545
2547 if (ftrace_match_record(rec, search, search_len, type)) { 2546 if (ftrace_match_record(rec, search, search_len, type)) {
2548 /* ensure it is not already in the array */ 2547 /* if it is in the array */
2549 exists = false; 2548 exists = false;
2550 for (i = 0; i < *idx; i++) 2549 for (i = 0; i < *idx; i++) {
2551 if (array[i] == rec->ip) { 2550 if (array[i] == rec->ip) {
2552 exists = true; 2551 exists = true;
2553 break; 2552 break;
2554 } 2553 }
2555 if (!exists) 2554 }
2556 array[(*idx)++] = rec->ip; 2555
2557 found = 1; 2556 if (!not) {
2557 fail = 0;
2558 if (!exists) {
2559 array[(*idx)++] = rec->ip;
2560 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2561 goto out;
2562 }
2563 } else {
2564 if (exists) {
2565 array[i] = array[--(*idx)];
2566 array[*idx] = 0;
2567 fail = 0;
2568 }
2569 }
2558 } 2570 }
2559 } while_for_each_ftrace_rec(); 2571 } while_for_each_ftrace_rec();
2560 2572out:
2561 mutex_unlock(&ftrace_lock); 2573 mutex_unlock(&ftrace_lock);
2562 2574
2563 return found ? 0 : -EINVAL; 2575 if (fail)
2576 return -EINVAL;
2577
2578 ftrace_graph_filter_enabled = 1;
2579 return 0;
2564} 2580}
2565 2581
2566static ssize_t 2582static ssize_t
@@ -2570,16 +2586,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2570 struct trace_parser parser; 2586 struct trace_parser parser;
2571 ssize_t read, ret; 2587 ssize_t read, ret;
2572 2588
2573 if (!cnt || cnt < 0) 2589 if (!cnt)
2574 return 0; 2590 return 0;
2575 2591
2576 mutex_lock(&graph_lock); 2592 mutex_lock(&graph_lock);
2577 2593
2578 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2579 ret = -EBUSY;
2580 goto out_unlock;
2581 }
2582
2583 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 2594 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2584 ret = -ENOMEM; 2595 ret = -ENOMEM;
2585 goto out_unlock; 2596 goto out_unlock;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index eac6875cb990..032c57ca6502 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -32,6 +32,7 @@
32#include <linux/splice.h> 32#include <linux/splice.h>
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/rwsem.h>
35#include <linux/ctype.h> 36#include <linux/ctype.h>
36#include <linux/init.h> 37#include <linux/init.h>
37#include <linux/poll.h> 38#include <linux/poll.h>
@@ -102,9 +103,6 @@ static inline void ftrace_enable_cpu(void)
102 103
103static cpumask_var_t __read_mostly tracing_buffer_mask; 104static cpumask_var_t __read_mostly tracing_buffer_mask;
104 105
105/* Define which cpu buffers are currently read in trace_pipe */
106static cpumask_var_t tracing_reader_cpumask;
107
108#define for_each_tracing_cpu(cpu) \ 106#define for_each_tracing_cpu(cpu) \
109 for_each_cpu(cpu, tracing_buffer_mask) 107 for_each_cpu(cpu, tracing_buffer_mask)
110 108
@@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly;
243 241
244/* 242/*
245 * trace_types_lock is used to protect the trace_types list. 243 * trace_types_lock is used to protect the trace_types list.
246 * This lock is also used to keep user access serialized.
247 * Accesses from userspace will grab this lock while userspace
248 * activities happen inside the kernel.
249 */ 244 */
250static DEFINE_MUTEX(trace_types_lock); 245static DEFINE_MUTEX(trace_types_lock);
251 246
247/*
248 * serialize the access of the ring buffer
249 *
250 * ring buffer serializes readers, but it is low level protection.
251 * The validity of the events (which returns by ring_buffer_peek() ..etc)
252 * are not protected by ring buffer.
253 *
254 * The content of events may become garbage if we allow other process consumes
255 * these events concurrently:
256 * A) the page of the consumed events may become a normal page
257 * (not reader page) in ring buffer, and this page will be rewrited
258 * by events producer.
259 * B) The page of the consumed events may become a page for splice_read,
260 * and this page will be returned to system.
261 *
262 * These primitives allow multi process access to different cpu ring buffer
263 * concurrently.
264 *
265 * These primitives don't distinguish read-only and read-consume access.
266 * Multi read-only access are also serialized.
267 */
268
269#ifdef CONFIG_SMP
270static DECLARE_RWSEM(all_cpu_access_lock);
271static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
272
273static inline void trace_access_lock(int cpu)
274{
275 if (cpu == TRACE_PIPE_ALL_CPU) {
276 /* gain it for accessing the whole ring buffer. */
277 down_write(&all_cpu_access_lock);
278 } else {
279 /* gain it for accessing a cpu ring buffer. */
280
281 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
282 down_read(&all_cpu_access_lock);
283
284 /* Secondly block other access to this @cpu ring buffer. */
285 mutex_lock(&per_cpu(cpu_access_lock, cpu));
286 }
287}
288
289static inline void trace_access_unlock(int cpu)
290{
291 if (cpu == TRACE_PIPE_ALL_CPU) {
292 up_write(&all_cpu_access_lock);
293 } else {
294 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
295 up_read(&all_cpu_access_lock);
296 }
297}
298
299static inline void trace_access_lock_init(void)
300{
301 int cpu;
302
303 for_each_possible_cpu(cpu)
304 mutex_init(&per_cpu(cpu_access_lock, cpu));
305}
306
307#else
308
309static DEFINE_MUTEX(access_lock);
310
311static inline void trace_access_lock(int cpu)
312{
313 (void)cpu;
314 mutex_lock(&access_lock);
315}
316
317static inline void trace_access_unlock(int cpu)
318{
319 (void)cpu;
320 mutex_unlock(&access_lock);
321}
322
323static inline void trace_access_lock_init(void)
324{
325}
326
327#endif
328
252/* trace_wait is a waitqueue for tasks blocked on trace_poll */ 329/* trace_wait is a waitqueue for tasks blocked on trace_poll */
253static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 330static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
254 331
@@ -1320,8 +1397,10 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1320 entry->fmt = fmt; 1397 entry->fmt = fmt;
1321 1398
1322 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1399 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1323 if (!filter_check_discard(call, entry, buffer, event)) 1400 if (!filter_check_discard(call, entry, buffer, event)) {
1324 ring_buffer_unlock_commit(buffer, event); 1401 ring_buffer_unlock_commit(buffer, event);
1402 ftrace_trace_stack(buffer, flags, 6, pc);
1403 }
1325 1404
1326out_unlock: 1405out_unlock:
1327 arch_spin_unlock(&trace_buf_lock); 1406 arch_spin_unlock(&trace_buf_lock);
@@ -1394,8 +1473,10 @@ int trace_array_vprintk(struct trace_array *tr,
1394 1473
1395 memcpy(&entry->buf, trace_buf, len); 1474 memcpy(&entry->buf, trace_buf, len);
1396 entry->buf[len] = '\0'; 1475 entry->buf[len] = '\0';
1397 if (!filter_check_discard(call, entry, buffer, event)) 1476 if (!filter_check_discard(call, entry, buffer, event)) {
1398 ring_buffer_unlock_commit(buffer, event); 1477 ring_buffer_unlock_commit(buffer, event);
1478 ftrace_trace_stack(buffer, irq_flags, 6, pc);
1479 }
1399 1480
1400 out_unlock: 1481 out_unlock:
1401 arch_spin_unlock(&trace_buf_lock); 1482 arch_spin_unlock(&trace_buf_lock);
@@ -1585,12 +1666,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1585} 1666}
1586 1667
1587/* 1668/*
1588 * No necessary locking here. The worst thing which can
1589 * happen is loosing events consumed at the same time
1590 * by a trace_pipe reader.
1591 * Other than that, we don't risk to crash the ring buffer
1592 * because it serializes the readers.
1593 *
1594 * The current tracer is copied to avoid a global locking 1669 * The current tracer is copied to avoid a global locking
1595 * all around. 1670 * all around.
1596 */ 1671 */
@@ -1645,12 +1720,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1645 } 1720 }
1646 1721
1647 trace_event_read_lock(); 1722 trace_event_read_lock();
1723 trace_access_lock(cpu_file);
1648 return p; 1724 return p;
1649} 1725}
1650 1726
1651static void s_stop(struct seq_file *m, void *p) 1727static void s_stop(struct seq_file *m, void *p)
1652{ 1728{
1729 struct trace_iterator *iter = m->private;
1730
1653 atomic_dec(&trace_record_cmdline_disabled); 1731 atomic_dec(&trace_record_cmdline_disabled);
1732 trace_access_unlock(iter->cpu_file);
1654 trace_event_read_unlock(); 1733 trace_event_read_unlock();
1655} 1734}
1656 1735
@@ -2841,22 +2920,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
2841 2920
2842 mutex_lock(&trace_types_lock); 2921 mutex_lock(&trace_types_lock);
2843 2922
2844 /* We only allow one reader per cpu */
2845 if (cpu_file == TRACE_PIPE_ALL_CPU) {
2846 if (!cpumask_empty(tracing_reader_cpumask)) {
2847 ret = -EBUSY;
2848 goto out;
2849 }
2850 cpumask_setall(tracing_reader_cpumask);
2851 } else {
2852 if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
2853 cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
2854 else {
2855 ret = -EBUSY;
2856 goto out;
2857 }
2858 }
2859
2860 /* create a buffer to store the information to pass to userspace */ 2923 /* create a buffer to store the information to pass to userspace */
2861 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2924 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2862 if (!iter) { 2925 if (!iter) {
@@ -2912,12 +2975,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
2912 2975
2913 mutex_lock(&trace_types_lock); 2976 mutex_lock(&trace_types_lock);
2914 2977
2915 if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
2916 cpumask_clear(tracing_reader_cpumask);
2917 else
2918 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2919
2920
2921 if (iter->trace->pipe_close) 2978 if (iter->trace->pipe_close)
2922 iter->trace->pipe_close(iter); 2979 iter->trace->pipe_close(iter);
2923 2980
@@ -3079,6 +3136,7 @@ waitagain:
3079 iter->pos = -1; 3136 iter->pos = -1;
3080 3137
3081 trace_event_read_lock(); 3138 trace_event_read_lock();
3139 trace_access_lock(iter->cpu_file);
3082 while (find_next_entry_inc(iter) != NULL) { 3140 while (find_next_entry_inc(iter) != NULL) {
3083 enum print_line_t ret; 3141 enum print_line_t ret;
3084 int len = iter->seq.len; 3142 int len = iter->seq.len;
@@ -3095,6 +3153,7 @@ waitagain:
3095 if (iter->seq.len >= cnt) 3153 if (iter->seq.len >= cnt)
3096 break; 3154 break;
3097 } 3155 }
3156 trace_access_unlock(iter->cpu_file);
3098 trace_event_read_unlock(); 3157 trace_event_read_unlock();
3099 3158
3100 /* Now copy what we have to the user */ 3159 /* Now copy what we have to the user */
@@ -3220,6 +3279,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3220 } 3279 }
3221 3280
3222 trace_event_read_lock(); 3281 trace_event_read_lock();
3282 trace_access_lock(iter->cpu_file);
3223 3283
3224 /* Fill as many pages as possible. */ 3284 /* Fill as many pages as possible. */
3225 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { 3285 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
@@ -3243,6 +3303,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3243 trace_seq_init(&iter->seq); 3303 trace_seq_init(&iter->seq);
3244 } 3304 }
3245 3305
3306 trace_access_unlock(iter->cpu_file);
3246 trace_event_read_unlock(); 3307 trace_event_read_unlock();
3247 mutex_unlock(&iter->mutex); 3308 mutex_unlock(&iter->mutex);
3248 3309
@@ -3544,10 +3605,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3544 3605
3545 info->read = 0; 3606 info->read = 0;
3546 3607
3608 trace_access_lock(info->cpu);
3547 ret = ring_buffer_read_page(info->tr->buffer, 3609 ret = ring_buffer_read_page(info->tr->buffer,
3548 &info->spare, 3610 &info->spare,
3549 count, 3611 count,
3550 info->cpu, 0); 3612 info->cpu, 0);
3613 trace_access_unlock(info->cpu);
3551 if (ret < 0) 3614 if (ret < 0)
3552 return 0; 3615 return 0;
3553 3616
@@ -3675,6 +3738,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3675 len &= PAGE_MASK; 3738 len &= PAGE_MASK;
3676 } 3739 }
3677 3740
3741 trace_access_lock(info->cpu);
3678 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3742 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3679 3743
3680 for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { 3744 for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
@@ -3722,6 +3786,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3722 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3786 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3723 } 3787 }
3724 3788
3789 trace_access_unlock(info->cpu);
3725 spd.nr_pages = i; 3790 spd.nr_pages = i;
3726 3791
3727 /* did we read anything? */ 3792 /* did we read anything? */
@@ -4158,6 +4223,8 @@ static __init int tracer_init_debugfs(void)
4158 struct dentry *d_tracer; 4223 struct dentry *d_tracer;
4159 int cpu; 4224 int cpu;
4160 4225
4226 trace_access_lock_init();
4227
4161 d_tracer = tracing_init_dentry(); 4228 d_tracer = tracing_init_dentry();
4162 4229
4163 trace_create_file("tracing_enabled", 0644, d_tracer, 4230 trace_create_file("tracing_enabled", 0644, d_tracer,
@@ -4392,9 +4459,6 @@ __init static int tracer_alloc_buffers(void)
4392 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4459 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4393 goto out_free_buffer_mask; 4460 goto out_free_buffer_mask;
4394 4461
4395 if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4396 goto out_free_tracing_cpumask;
4397
4398 /* To save memory, keep the ring buffer size to its minimum */ 4462 /* To save memory, keep the ring buffer size to its minimum */
4399 if (ring_buffer_expanded) 4463 if (ring_buffer_expanded)
4400 ring_buf_size = trace_buf_size; 4464 ring_buf_size = trace_buf_size;
@@ -4452,8 +4516,6 @@ __init static int tracer_alloc_buffers(void)
4452 return 0; 4516 return 0;
4453 4517
4454out_free_cpumask: 4518out_free_cpumask:
4455 free_cpumask_var(tracing_reader_cpumask);
4456out_free_tracing_cpumask:
4457 free_cpumask_var(tracing_cpumask); 4519 free_cpumask_var(tracing_cpumask);
4458out_free_buffer_mask: 4520out_free_buffer_mask:
4459 free_cpumask_var(tracing_buffer_mask); 4521 free_cpumask_var(tracing_buffer_mask);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4df6a77eb196..b477fce41edf 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -497,6 +497,7 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
497#ifdef CONFIG_DYNAMIC_FTRACE 497#ifdef CONFIG_DYNAMIC_FTRACE
498/* TODO: make this variable */ 498/* TODO: make this variable */
499#define FTRACE_GRAPH_MAX_FUNCS 32 499#define FTRACE_GRAPH_MAX_FUNCS 32
500extern int ftrace_graph_filter_enabled;
500extern int ftrace_graph_count; 501extern int ftrace_graph_count;
501extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 502extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
502 503
@@ -504,7 +505,7 @@ static inline int ftrace_graph_addr(unsigned long addr)
504{ 505{
505 int i; 506 int i;
506 507
507 if (!ftrace_graph_count || test_tsk_trace_graph(current)) 508 if (!ftrace_graph_filter_enabled)
508 return 1; 509 return 1;
509 510
510 for (i = 0; i < ftrace_graph_count; i++) { 511 for (i = 0; i < ftrace_graph_count; i++) {
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 4a194f08f88c..b9bc4d470177 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -307,8 +307,23 @@ static int annotated_branch_stat_cmp(void *p1, void *p2)
307 return -1; 307 return -1;
308 if (percent_a > percent_b) 308 if (percent_a > percent_b)
309 return 1; 309 return 1;
310 else 310
311 return 0; 311 if (a->incorrect < b->incorrect)
312 return -1;
313 if (a->incorrect > b->incorrect)
314 return 1;
315
316 /*
317 * Since the above shows worse (incorrect) cases
318 * first, we continue that by showing best (correct)
319 * cases last.
320 */
321 if (a->correct > b->correct)
322 return -1;
323 if (a->correct < b->correct)
324 return 1;
325
326 return 0;
312} 327}
313 328
314static struct tracer_stat annotated_branch_stats = { 329static struct tracer_stat annotated_branch_stats = {
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 189b09baf4fb..c2a3077b7353 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -520,41 +520,16 @@ out:
520 return ret; 520 return ret;
521} 521}
522 522
523extern char *__bad_type_size(void);
524
525#undef FIELD
526#define FIELD(type, name) \
527 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
528 #type, "common_" #name, offsetof(typeof(field), name), \
529 sizeof(field.name), is_signed_type(type)
530
531static int trace_write_header(struct trace_seq *s)
532{
533 struct trace_entry field;
534
535 /* struct trace_entry */
536 return trace_seq_printf(s,
537 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
538 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
539 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
540 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
541 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
542 "\n",
543 FIELD(unsigned short, type),
544 FIELD(unsigned char, flags),
545 FIELD(unsigned char, preempt_count),
546 FIELD(int, pid),
547 FIELD(int, lock_depth));
548}
549
550static ssize_t 523static ssize_t
551event_format_read(struct file *filp, char __user *ubuf, size_t cnt, 524event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
552 loff_t *ppos) 525 loff_t *ppos)
553{ 526{
554 struct ftrace_event_call *call = filp->private_data; 527 struct ftrace_event_call *call = filp->private_data;
528 struct ftrace_event_field *field;
555 struct trace_seq *s; 529 struct trace_seq *s;
530 int common_field_count = 5;
556 char *buf; 531 char *buf;
557 int r; 532 int r = 0;
558 533
559 if (*ppos) 534 if (*ppos)
560 return 0; 535 return 0;
@@ -565,14 +540,48 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
565 540
566 trace_seq_init(s); 541 trace_seq_init(s);
567 542
568 /* If any of the first writes fail, so will the show_format. */
569
570 trace_seq_printf(s, "name: %s\n", call->name); 543 trace_seq_printf(s, "name: %s\n", call->name);
571 trace_seq_printf(s, "ID: %d\n", call->id); 544 trace_seq_printf(s, "ID: %d\n", call->id);
572 trace_seq_printf(s, "format:\n"); 545 trace_seq_printf(s, "format:\n");
573 trace_write_header(s);
574 546
575 r = call->show_format(call, s); 547 list_for_each_entry_reverse(field, &call->fields, link) {
548 /*
549 * Smartly shows the array type(except dynamic array).
550 * Normal:
551 * field:TYPE VAR
552 * If TYPE := TYPE[LEN], it is shown:
553 * field:TYPE VAR[LEN]
554 */
555 const char *array_descriptor = strchr(field->type, '[');
556
557 if (!strncmp(field->type, "__data_loc", 10))
558 array_descriptor = NULL;
559
560 if (!array_descriptor) {
561 r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
562 "\tsize:%u;\tsigned:%d;\n",
563 field->type, field->name, field->offset,
564 field->size, !!field->is_signed);
565 } else {
566 r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
567 "\tsize:%u;\tsigned:%d;\n",
568 (int)(array_descriptor - field->type),
569 field->type, field->name,
570 array_descriptor, field->offset,
571 field->size, !!field->is_signed);
572 }
573
574 if (--common_field_count == 0)
575 r = trace_seq_printf(s, "\n");
576
577 if (!r)
578 break;
579 }
580
581 if (r)
582 r = trace_seq_printf(s, "\nprint fmt: %s\n",
583 call->print_fmt);
584
576 if (!r) { 585 if (!r) {
577 /* 586 /*
578 * ug! The format output is bigger than a PAGE!! 587 * ug! The format output is bigger than a PAGE!!
@@ -948,10 +957,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
948 filter); 957 filter);
949 } 958 }
950 959
951 /* A trace may not want to export its format */
952 if (!call->show_format)
953 return 0;
954
955 trace_create_file("format", 0444, call->dir, call, 960 trace_create_file("format", 0444, call->dir, call,
956 format); 961 format);
957 962
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index d4fa5dc1ee4e..e091f64ba6ce 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -62,78 +62,6 @@ static void __always_unused ____ftrace_check_##name(void) \
62 62
63#include "trace_entries.h" 63#include "trace_entries.h"
64 64
65
66#undef __field
67#define __field(type, item) \
68 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
69 "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
70 offsetof(typeof(field), item), \
71 sizeof(field.item), is_signed_type(type)); \
72 if (!ret) \
73 return 0;
74
75#undef __field_desc
76#define __field_desc(type, container, item) \
77 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
78 "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
79 offsetof(typeof(field), container.item), \
80 sizeof(field.container.item), \
81 is_signed_type(type)); \
82 if (!ret) \
83 return 0;
84
85#undef __array
86#define __array(type, item, len) \
87 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
88 "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
89 offsetof(typeof(field), item), \
90 sizeof(field.item), is_signed_type(type)); \
91 if (!ret) \
92 return 0;
93
94#undef __array_desc
95#define __array_desc(type, container, item, len) \
96 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
97 "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
98 offsetof(typeof(field), container.item), \
99 sizeof(field.container.item), \
100 is_signed_type(type)); \
101 if (!ret) \
102 return 0;
103
104#undef __dynamic_array
105#define __dynamic_array(type, item) \
106 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
107 "offset:%zu;\tsize:0;\tsigned:%u;\n", \
108 offsetof(typeof(field), item), \
109 is_signed_type(type)); \
110 if (!ret) \
111 return 0;
112
113#undef F_printk
114#define F_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
115
116#undef __entry
117#define __entry REC
118
119#undef FTRACE_ENTRY
120#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
121static int \
122ftrace_format_##name(struct ftrace_event_call *unused, \
123 struct trace_seq *s) \
124{ \
125 struct struct_name field __attribute__((unused)); \
126 int ret = 0; \
127 \
128 tstruct; \
129 \
130 trace_seq_printf(s, "\nprint fmt: " print); \
131 \
132 return ret; \
133}
134
135#include "trace_entries.h"
136
137#undef __field 65#undef __field
138#define __field(type, item) \ 66#define __field(type, item) \
139 ret = trace_define_field(event_call, #type, #item, \ 67 ret = trace_define_field(event_call, #type, #item, \
@@ -175,7 +103,12 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
175 return ret; 103 return ret;
176 104
177#undef __dynamic_array 105#undef __dynamic_array
178#define __dynamic_array(type, item) 106#define __dynamic_array(type, item) \
107 ret = trace_define_field(event_call, #type, #item, \
108 offsetof(typeof(field), item), \
109 0, is_signed_type(type), FILTER_OTHER);\
110 if (ret) \
111 return ret;
179 112
180#undef FTRACE_ENTRY 113#undef FTRACE_ENTRY
181#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 114#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
@@ -198,6 +131,9 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
198 return 0; 131 return 0;
199} 132}
200 133
134#undef __entry
135#define __entry REC
136
201#undef __field 137#undef __field
202#define __field(type, item) 138#define __field(type, item)
203 139
@@ -213,6 +149,9 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
213#undef __dynamic_array 149#undef __dynamic_array
214#define __dynamic_array(type, item) 150#define __dynamic_array(type, item)
215 151
152#undef F_printk
153#define F_printk(fmt, args...) #fmt ", " __stringify(args)
154
216#undef FTRACE_ENTRY 155#undef FTRACE_ENTRY
217#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ 156#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
218 \ 157 \
@@ -223,7 +162,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
223 .id = type, \ 162 .id = type, \
224 .system = __stringify(TRACE_SYSTEM), \ 163 .system = __stringify(TRACE_SYSTEM), \
225 .raw_init = ftrace_raw_init_event, \ 164 .raw_init = ftrace_raw_init_event, \
226 .show_format = ftrace_format_##call, \ 165 .print_fmt = print, \
227 .define_fields = ftrace_define_fields_##call, \ 166 .define_fields = ftrace_define_fields_##call, \
228}; \ 167}; \
229 168
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b1342c5d37cf..616b135c9eb9 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -212,13 +212,11 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
212 int cpu; 212 int cpu;
213 int pc; 213 int pc;
214 214
215 if (unlikely(!tr))
216 return 0;
217
218 if (!ftrace_trace_task(current)) 215 if (!ftrace_trace_task(current))
219 return 0; 216 return 0;
220 217
221 if (!ftrace_graph_addr(trace->func)) 218 /* trace it when it is-nested-in or is a function enabled. */
219 if (!(trace->depth || ftrace_graph_addr(trace->func)))
222 return 0; 220 return 0;
223 221
224 local_irq_save(flags); 222 local_irq_save(flags);
@@ -231,9 +229,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
231 } else { 229 } else {
232 ret = 0; 230 ret = 0;
233 } 231 }
234 /* Only do the atomic if it is not already set */
235 if (!test_tsk_trace_graph(current))
236 set_tsk_trace_graph(current);
237 232
238 atomic_dec(&data->disabled); 233 atomic_dec(&data->disabled);
239 local_irq_restore(flags); 234 local_irq_restore(flags);
@@ -281,17 +276,24 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
281 pc = preempt_count(); 276 pc = preempt_count();
282 __trace_graph_return(tr, trace, flags, pc); 277 __trace_graph_return(tr, trace, flags, pc);
283 } 278 }
284 if (!trace->depth)
285 clear_tsk_trace_graph(current);
286 atomic_dec(&data->disabled); 279 atomic_dec(&data->disabled);
287 local_irq_restore(flags); 280 local_irq_restore(flags);
288} 281}
289 282
283void set_graph_array(struct trace_array *tr)
284{
285 graph_array = tr;
286
287 /* Make graph_array visible before we start tracing */
288
289 smp_mb();
290}
291
290static int graph_trace_init(struct trace_array *tr) 292static int graph_trace_init(struct trace_array *tr)
291{ 293{
292 int ret; 294 int ret;
293 295
294 graph_array = tr; 296 set_graph_array(tr);
295 ret = register_ftrace_graph(&trace_graph_return, 297 ret = register_ftrace_graph(&trace_graph_return,
296 &trace_graph_entry); 298 &trace_graph_entry);
297 if (ret) 299 if (ret)
@@ -301,11 +303,6 @@ static int graph_trace_init(struct trace_array *tr)
301 return 0; 303 return 0;
302} 304}
303 305
304void set_graph_array(struct trace_array *tr)
305{
306 graph_array = tr;
307}
308
309static void graph_trace_reset(struct trace_array *tr) 306static void graph_trace_reset(struct trace_array *tr)
310{ 307{
311 tracing_stop_cmdline_record(); 308 tracing_stop_cmdline_record();
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 50b1b8239806..53f748b64ef3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1174,80 +1174,60 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1174 return 0; 1174 return 0;
1175} 1175}
1176 1176
1177static int __probe_event_show_format(struct trace_seq *s, 1177static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1178 struct trace_probe *tp, const char *fmt,
1179 const char *arg)
1180{ 1178{
1181 int i; 1179 int i;
1180 int pos = 0;
1182 1181
1183 /* Show format */ 1182 const char *fmt, *arg;
1184 if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
1185 return 0;
1186 1183
1187 for (i = 0; i < tp->nr_args; i++) 1184 if (!probe_is_return(tp)) {
1188 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name)) 1185 fmt = "(%lx)";
1189 return 0; 1186 arg = "REC->" FIELD_STRING_IP;
1187 } else {
1188 fmt = "(%lx <- %lx)";
1189 arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
1190 }
1190 1191
1191 if (!trace_seq_printf(s, "\", %s", arg)) 1192 /* When len=0, we just calculate the needed length */
1192 return 0; 1193#define LEN_OR_ZERO (len ? len - pos : 0)
1193 1194
1194 for (i = 0; i < tp->nr_args; i++) 1195 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
1195 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
1196 return 0;
1197 1196
1198 return trace_seq_puts(s, "\n"); 1197 for (i = 0; i < tp->nr_args; i++) {
1199} 1198 pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%%lx",
1199 tp->args[i].name);
1200 }
1200 1201
1201#undef SHOW_FIELD 1202 pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
1202#define SHOW_FIELD(type, item, name) \
1203 do { \
1204 ret = trace_seq_printf(s, "\tfield:" #type " %s;\t" \
1205 "offset:%u;\tsize:%u;\tsigned:%d;\n", name,\
1206 (unsigned int)offsetof(typeof(field), item),\
1207 (unsigned int)sizeof(type), \
1208 is_signed_type(type)); \
1209 if (!ret) \
1210 return 0; \
1211 } while (0)
1212 1203
1213static int kprobe_event_show_format(struct ftrace_event_call *call, 1204 for (i = 0; i < tp->nr_args; i++) {
1214 struct trace_seq *s) 1205 pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1215{ 1206 tp->args[i].name);
1216 struct kprobe_trace_entry field __attribute__((unused)); 1207 }
1217 int ret, i;
1218 struct trace_probe *tp = (struct trace_probe *)call->data;
1219
1220 SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
1221 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1222 1208
1223 /* Show fields */ 1209#undef LEN_OR_ZERO
1224 for (i = 0; i < tp->nr_args; i++)
1225 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1226 trace_seq_puts(s, "\n");
1227 1210
1228 return __probe_event_show_format(s, tp, "(%lx)", 1211 /* return the length of print_fmt */
1229 "REC->" FIELD_STRING_IP); 1212 return pos;
1230} 1213}
1231 1214
1232static int kretprobe_event_show_format(struct ftrace_event_call *call, 1215static int set_print_fmt(struct trace_probe *tp)
1233 struct trace_seq *s)
1234{ 1216{
1235 struct kretprobe_trace_entry field __attribute__((unused)); 1217 int len;
1236 int ret, i; 1218 char *print_fmt;
1237 struct trace_probe *tp = (struct trace_probe *)call->data;
1238 1219
1239 SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC); 1220 /* First: called with 0 length to calculate the needed length */
1240 SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP); 1221 len = __set_print_fmt(tp, NULL, 0);
1241 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); 1222 print_fmt = kmalloc(len + 1, GFP_KERNEL);
1223 if (!print_fmt)
1224 return -ENOMEM;
1242 1225
1243 /* Show fields */ 1226 /* Second: actually write the @print_fmt */
1244 for (i = 0; i < tp->nr_args; i++) 1227 __set_print_fmt(tp, print_fmt, len + 1);
1245 SHOW_FIELD(unsigned long, args[i], tp->args[i].name); 1228 tp->call.print_fmt = print_fmt;
1246 trace_seq_puts(s, "\n");
1247 1229
1248 return __probe_event_show_format(s, tp, "(%lx <- %lx)", 1230 return 0;
1249 "REC->" FIELD_STRING_FUNC
1250 ", REC->" FIELD_STRING_RETIP);
1251} 1231}
1252 1232
1253#ifdef CONFIG_EVENT_PROFILE 1233#ifdef CONFIG_EVENT_PROFILE
@@ -1448,18 +1428,20 @@ static int register_probe_event(struct trace_probe *tp)
1448 if (probe_is_return(tp)) { 1428 if (probe_is_return(tp)) {
1449 tp->event.trace = print_kretprobe_event; 1429 tp->event.trace = print_kretprobe_event;
1450 call->raw_init = probe_event_raw_init; 1430 call->raw_init = probe_event_raw_init;
1451 call->show_format = kretprobe_event_show_format;
1452 call->define_fields = kretprobe_event_define_fields; 1431 call->define_fields = kretprobe_event_define_fields;
1453 } else { 1432 } else {
1454 tp->event.trace = print_kprobe_event; 1433 tp->event.trace = print_kprobe_event;
1455 call->raw_init = probe_event_raw_init; 1434 call->raw_init = probe_event_raw_init;
1456 call->show_format = kprobe_event_show_format;
1457 call->define_fields = kprobe_event_define_fields; 1435 call->define_fields = kprobe_event_define_fields;
1458 } 1436 }
1437 if (set_print_fmt(tp) < 0)
1438 return -ENOMEM;
1459 call->event = &tp->event; 1439 call->event = &tp->event;
1460 call->id = register_ftrace_event(&tp->event); 1440 call->id = register_ftrace_event(&tp->event);
1461 if (!call->id) 1441 if (!call->id) {
1442 kfree(call->print_fmt);
1462 return -ENODEV; 1443 return -ENODEV;
1444 }
1463 call->enabled = 0; 1445 call->enabled = 0;
1464 call->regfunc = probe_event_enable; 1446 call->regfunc = probe_event_enable;
1465 call->unregfunc = probe_event_disable; 1447 call->unregfunc = probe_event_disable;
@@ -1472,6 +1454,7 @@ static int register_probe_event(struct trace_probe *tp)
1472 ret = trace_add_event_call(call); 1454 ret = trace_add_event_call(call);
1473 if (ret) { 1455 if (ret) {
1474 pr_info("Failed to register kprobe event: %s\n", call->name); 1456 pr_info("Failed to register kprobe event: %s\n", call->name);
1457 kfree(call->print_fmt);
1475 unregister_ftrace_event(&tp->event); 1458 unregister_ftrace_event(&tp->event);
1476 } 1459 }
1477 return ret; 1460 return ret;
@@ -1481,6 +1464,7 @@ static void unregister_probe_event(struct trace_probe *tp)
1481{ 1464{
1482 /* tp->event is unregistered in trace_remove_event_call() */ 1465 /* tp->event is unregistered in trace_remove_event_call() */
1483 trace_remove_event_call(&tp->call); 1466 trace_remove_event_call(&tp->call);
1467 kfree(tp->call.print_fmt);
1484} 1468}
1485 1469
1486/* Make a debugfs interface for controling probe points */ 1470/* Make a debugfs interface for controling probe points */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 75289f372dd2..49cea70fbf6d 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -143,70 +143,65 @@ extern char *__bad_type_size(void);
143 #type, #name, offsetof(typeof(trace), name), \ 143 #type, #name, offsetof(typeof(trace), name), \
144 sizeof(trace.name), is_signed_type(type) 144 sizeof(trace.name), is_signed_type(type)
145 145
146int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) 146static
147int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
147{ 148{
148 int i; 149 int i;
149 int ret; 150 int pos = 0;
150 struct syscall_metadata *entry = call->data;
151 struct syscall_trace_enter trace;
152 int offset = offsetof(struct syscall_trace_enter, args);
153 151
154 ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" 152 /* When len=0, we just calculate the needed length */
155 "\tsigned:%u;\n", 153#define LEN_OR_ZERO (len ? len - pos : 0)
156 SYSCALL_FIELD(int, nr));
157 if (!ret)
158 return 0;
159 154
155 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
160 for (i = 0; i < entry->nb_args; i++) { 156 for (i = 0; i < entry->nb_args; i++) {
161 ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i], 157 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
162 entry->args[i]); 158 entry->args[i], sizeof(unsigned long),
163 if (!ret) 159 i == entry->nb_args - 1 ? "" : ", ");
164 return 0;
165 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
166 "\tsigned:%u;\n", offset,
167 sizeof(unsigned long),
168 is_signed_type(unsigned long));
169 if (!ret)
170 return 0;
171 offset += sizeof(unsigned long);
172 } 160 }
161 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
173 162
174 trace_seq_puts(s, "\nprint fmt: \"");
175 for (i = 0; i < entry->nb_args; i++) { 163 for (i = 0; i < entry->nb_args; i++) {
176 ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i], 164 pos += snprintf(buf + pos, LEN_OR_ZERO,
177 sizeof(unsigned long), 165 ", ((unsigned long)(REC->%s))", entry->args[i]);
178 i == entry->nb_args - 1 ? "" : ", ");
179 if (!ret)
180 return 0;
181 } 166 }
182 trace_seq_putc(s, '"');
183 167
184 for (i = 0; i < entry->nb_args; i++) { 168#undef LEN_OR_ZERO
185 ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
186 entry->args[i]);
187 if (!ret)
188 return 0;
189 }
190 169
191 return trace_seq_putc(s, '\n'); 170 /* return the length of print_fmt */
171 return pos;
192} 172}
193 173
194int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) 174static int set_syscall_print_fmt(struct ftrace_event_call *call)
195{ 175{
196 int ret; 176 char *print_fmt;
197 struct syscall_trace_exit trace; 177 int len;
178 struct syscall_metadata *entry = call->data;
198 179
199 ret = trace_seq_printf(s, 180 if (entry->enter_event != call) {
200 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" 181 call->print_fmt = "\"0x%lx\", REC->ret";
201 "\tsigned:%u;\n"
202 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
203 "\tsigned:%u;\n",
204 SYSCALL_FIELD(int, nr),
205 SYSCALL_FIELD(long, ret));
206 if (!ret)
207 return 0; 182 return 0;
183 }
208 184
209 return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n"); 185 /* First: called with 0 length to calculate the needed length */
186 len = __set_enter_print_fmt(entry, NULL, 0);
187
188 print_fmt = kmalloc(len + 1, GFP_KERNEL);
189 if (!print_fmt)
190 return -ENOMEM;
191
192 /* Second: actually write the @print_fmt */
193 __set_enter_print_fmt(entry, print_fmt, len + 1);
194 call->print_fmt = print_fmt;
195
196 return 0;
197}
198
199static void free_syscall_print_fmt(struct ftrace_event_call *call)
200{
201 struct syscall_metadata *entry = call->data;
202
203 if (entry->enter_event == call)
204 kfree(call->print_fmt);
210} 205}
211 206
212int syscall_enter_define_fields(struct ftrace_event_call *call) 207int syscall_enter_define_fields(struct ftrace_event_call *call)
@@ -386,12 +381,17 @@ int init_syscall_trace(struct ftrace_event_call *call)
386{ 381{
387 int id; 382 int id;
388 383
389 id = register_ftrace_event(call->event); 384 if (set_syscall_print_fmt(call) < 0)
390 if (!id) 385 return -ENOMEM;
391 return -ENODEV; 386
392 call->id = id; 387 id = trace_event_raw_init(call);
393 INIT_LIST_HEAD(&call->fields); 388
394 return 0; 389 if (id < 0) {
390 free_syscall_print_fmt(call);
391 return id;
392 }
393
394 return id;
395} 395}
396 396
397int __init init_ftrace_syscalls(void) 397int __init init_ftrace_syscalls(void)
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index ea6f6e3adaea..f3c9c0a90b98 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -136,13 +136,14 @@ my %text_sections = (
136 ".text.unlikely" => 1, 136 ".text.unlikely" => 1,
137); 137);
138 138
139$objdump = "objdump" if ((length $objdump) == 0); 139# Note: we are nice to C-programmers here, thus we skip the '||='-idiom.
140$objcopy = "objcopy" if ((length $objcopy) == 0); 140$objdump = 'objdump' if (!$objdump);
141$cc = "gcc" if ((length $cc) == 0); 141$objcopy = 'objcopy' if (!$objcopy);
142$ld = "ld" if ((length $ld) == 0); 142$cc = 'gcc' if (!$cc);
143$nm = "nm" if ((length $nm) == 0); 143$ld = 'ld' if (!$ld);
144$rm = "rm" if ((length $rm) == 0); 144$nm = 'nm' if (!$nm);
145$mv = "mv" if ((length $mv) == 0); 145$rm = 'rm' if (!$rm);
146$mv = 'mv' if (!$mv);
146 147
147#print STDERR "running: $P '$arch' '$objdump' '$objcopy' '$cc' '$ld' " . 148#print STDERR "running: $P '$arch' '$objdump' '$objcopy' '$cc' '$ld' " .
148# "'$nm' '$rm' '$mv' '$inputfile'\n"; 149# "'$nm' '$rm' '$mv' '$inputfile'\n";
@@ -432,14 +433,14 @@ sub update_funcs
432 433
433 # Loop through all the mcount caller offsets and print a reference 434 # Loop through all the mcount caller offsets and print a reference
434 # to the caller based from the ref_func. 435 # to the caller based from the ref_func.
435 for (my $i=0; $i <= $#offsets; $i++) { 436 if (!$opened) {
436 if (!$opened) { 437 open(FILE, ">$mcount_s") || die "can't create $mcount_s\n";
437 open(FILE, ">$mcount_s") || die "can't create $mcount_s\n"; 438 $opened = 1;
438 $opened = 1; 439 print FILE "\t.section $mcount_section,\"a\",$section_type\n";
439 print FILE "\t.section $mcount_section,\"a\",$section_type\n"; 440 print FILE "\t.align $alignment\n" if (defined($alignment));
440 print FILE "\t.align $alignment\n" if (defined($alignment)); 441 }
441 } 442 foreach my $cur_offset (@offsets) {
442 printf FILE "\t%s %s + %d\n", $type, $ref_func, $offsets[$i] - $offset; 443 printf FILE "\t%s %s + %d\n", $type, $ref_func, $cur_offset - $offset;
443 } 444 }
444} 445}
445 446
@@ -476,11 +477,7 @@ while (<IN>) {
476 $read_headers = 0; 477 $read_headers = 0;
477 478
478 # Only record text sections that we know are safe 479 # Only record text sections that we know are safe
479 if (defined($text_sections{$1})) { 480 $read_function = defined($text_sections{$1});
480 $read_function = 1;
481 } else {
482 $read_function = 0;
483 }
484 # print out any recorded offsets 481 # print out any recorded offsets
485 update_funcs(); 482 update_funcs();
486 483
@@ -514,7 +511,7 @@ while (<IN>) {
514 } 511 }
515 # is this a call site to mcount? If so, record it to print later 512 # is this a call site to mcount? If so, record it to print later
516 if ($text_found && /$mcount_regex/) { 513 if ($text_found && /$mcount_regex/) {
517 $offsets[$#offsets + 1] = hex $1; 514 push(@offsets, hex $1);
518 } 515 }
519} 516}
520 517