diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-04 07:59:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-04 07:59:40 -0400 |
commit | 64edbc562034f2ec3fce382cb208fab40586d005 (patch) | |
tree | 7fbfaaea9467d14a1a6ac5667ce01be5ccb9b635 | |
parent | 43bd1236234cacbc18d1476a9b57e7a306efddf5 (diff) | |
parent | 0f6ce3de4ef6ff940308087c49760d068851c1a7 (diff) |
Merge branch 'tracing/ftrace' into tracing/core
Merge reason: this mini-topic had outstanding problems that delayed
its merge, so it does not fast-forward.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | Documentation/kernel-parameters.txt | 17 | ||||
-rw-r--r-- | include/linux/ftrace_event.h | 2 | ||||
-rw-r--r-- | include/trace/events/irq.h | 24 | ||||
-rw-r--r-- | include/trace/ftrace.h | 126 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 60 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 50 | ||||
-rw-r--r-- | kernel/trace/trace.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 3 |
9 files changed, 205 insertions, 86 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 9243dd84f4d6..fcd3bfbe74e8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -751,12 +751,25 @@ and is between 256 and 4096 characters. It is defined in the file | |||
751 | ia64_pal_cache_flush instead of SAL_CACHE_FLUSH. | 751 | ia64_pal_cache_flush instead of SAL_CACHE_FLUSH. |
752 | 752 | ||
753 | ftrace=[tracer] | 753 | ftrace=[tracer] |
754 | [ftrace] will set and start the specified tracer | 754 | [FTRACE] will set and start the specified tracer |
755 | as early as possible in order to facilitate early | 755 | as early as possible in order to facilitate early |
756 | boot debugging. | 756 | boot debugging. |
757 | 757 | ||
758 | ftrace_dump_on_oops | 758 | ftrace_dump_on_oops |
759 | [ftrace] will dump the trace buffers on oops. | 759 | [FTRACE] will dump the trace buffers on oops. |
760 | |||
761 | ftrace_filter=[function-list] | ||
762 | [FTRACE] Limit the functions traced by the function | ||
763 | tracer at boot up. function-list is a comma separated | ||
764 | list of functions. This list can be changed at run | ||
765 | time by the set_ftrace_filter file in the debugfs | ||
766 | tracing directory. | ||
767 | |||
768 | ftrace_notrace=[function-list] | ||
769 | [FTRACE] Do not trace the functions specified in | ||
770 | function-list. This list can be changed at run time | ||
771 | by the set_ftrace_notrace file in the debugfs | ||
772 | tracing directory. | ||
760 | 773 | ||
761 | gamecon.map[2|3]= | 774 | gamecon.map[2|3]= |
762 | [HW,JOY] Multisystem joystick and NES/SNES/PSX pad | 775 | [HW,JOY] Multisystem joystick and NES/SNES/PSX pad |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index bbf40f624fc8..5c093ffc655b 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -51,6 +51,7 @@ struct trace_iterator { | |||
51 | int cpu_file; | 51 | int cpu_file; |
52 | struct mutex mutex; | 52 | struct mutex mutex; |
53 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | 53 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
54 | unsigned long iter_flags; | ||
54 | 55 | ||
55 | /* The below is zeroed out in pipe_read */ | 56 | /* The below is zeroed out in pipe_read */ |
56 | struct trace_seq seq; | 57 | struct trace_seq seq; |
@@ -58,7 +59,6 @@ struct trace_iterator { | |||
58 | int cpu; | 59 | int cpu; |
59 | u64 ts; | 60 | u64 ts; |
60 | 61 | ||
61 | unsigned long iter_flags; | ||
62 | loff_t pos; | 62 | loff_t pos; |
63 | long idx; | 63 | long idx; |
64 | 64 | ||
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 683fb36a9943..b0c7ede55eb1 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h | |||
@@ -7,18 +7,18 @@ | |||
7 | #undef TRACE_SYSTEM | 7 | #undef TRACE_SYSTEM |
8 | #define TRACE_SYSTEM irq | 8 | #define TRACE_SYSTEM irq |
9 | 9 | ||
10 | #define softirq_name(sirq) { sirq, #sirq } | 10 | #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } |
11 | #define show_softirq_name(val) \ | 11 | #define show_softirq_name(val) \ |
12 | __print_symbolic(val, \ | 12 | __print_symbolic(val, \ |
13 | softirq_name(HI_SOFTIRQ), \ | 13 | softirq_name(HI), \ |
14 | softirq_name(TIMER_SOFTIRQ), \ | 14 | softirq_name(TIMER), \ |
15 | softirq_name(NET_TX_SOFTIRQ), \ | 15 | softirq_name(NET_TX), \ |
16 | softirq_name(NET_RX_SOFTIRQ), \ | 16 | softirq_name(NET_RX), \ |
17 | softirq_name(BLOCK_SOFTIRQ), \ | 17 | softirq_name(BLOCK), \ |
18 | softirq_name(TASKLET_SOFTIRQ), \ | 18 | softirq_name(TASKLET), \ |
19 | softirq_name(SCHED_SOFTIRQ), \ | 19 | softirq_name(SCHED), \ |
20 | softirq_name(HRTIMER_SOFTIRQ), \ | 20 | softirq_name(HRTIMER), \ |
21 | softirq_name(RCU_SOFTIRQ)) | 21 | softirq_name(RCU)) |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * irq_handler_entry - called immediately before the irq action handler | 24 | * irq_handler_entry - called immediately before the irq action handler |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index b4ec83ae711f..b5478dab579b 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -18,14 +18,17 @@ | |||
18 | 18 | ||
19 | #include <linux/ftrace_event.h> | 19 | #include <linux/ftrace_event.h> |
20 | 20 | ||
21 | #undef __field | ||
22 | #define __field(type, item) type item; | ||
23 | |||
21 | #undef __array | 24 | #undef __array |
22 | #define __array(type, item, len) type item[len]; | 25 | #define __array(type, item, len) type item[len]; |
23 | 26 | ||
24 | #undef __field | 27 | #undef __dynamic_array |
25 | #define __field(type, item) type item; | 28 | #define __dynamic_array(type, item, len) unsigned short __data_loc_##item; |
26 | 29 | ||
27 | #undef __string | 30 | #undef __string |
28 | #define __string(item, src) unsigned short __str_loc_##item; | 31 | #define __string(item, src) __dynamic_array(char, item, -1) |
29 | 32 | ||
30 | #undef TP_STRUCT__entry | 33 | #undef TP_STRUCT__entry |
31 | #define TP_STRUCT__entry(args...) args | 34 | #define TP_STRUCT__entry(args...) args |
@@ -35,7 +38,7 @@ | |||
35 | struct ftrace_raw_##name { \ | 38 | struct ftrace_raw_##name { \ |
36 | struct trace_entry ent; \ | 39 | struct trace_entry ent; \ |
37 | tstruct \ | 40 | tstruct \ |
38 | char __str_data[0]; \ | 41 | char __data[0]; \ |
39 | }; \ | 42 | }; \ |
40 | static struct ftrace_event_call event_##name | 43 | static struct ftrace_event_call event_##name |
41 | 44 | ||
@@ -47,30 +50,31 @@ | |||
47 | * | 50 | * |
48 | * Include the following: | 51 | * Include the following: |
49 | * | 52 | * |
50 | * struct ftrace_str_offsets_<call> { | 53 | * struct ftrace_data_offsets_<call> { |
51 | * int <str1>; | 54 | * int <item1>; |
52 | * int <str2>; | 55 | * int <item2>; |
53 | * [...] | 56 | * [...] |
54 | * }; | 57 | * }; |
55 | * | 58 | * |
56 | * The __string() macro will create each int <str>, this is to | 59 | * The __dynamic_array() macro will create each int <item>, this is |
57 | * keep the offset of each string from the beggining of the event | 60 | * to keep the offset of each array from the beginning of the event. |
58 | * once we perform the strlen() of the src strings. | ||
59 | * | ||
60 | */ | 61 | */ |
61 | 62 | ||
63 | #undef __field | ||
64 | #define __field(type, item); | ||
65 | |||
62 | #undef __array | 66 | #undef __array |
63 | #define __array(type, item, len) | 67 | #define __array(type, item, len) |
64 | 68 | ||
65 | #undef __field | 69 | #undef __dynamic_array |
66 | #define __field(type, item); | 70 | #define __dynamic_array(type, item, len) int item; |
67 | 71 | ||
68 | #undef __string | 72 | #undef __string |
69 | #define __string(item, src) int item; | 73 | #define __string(item, src) __dynamic_array(char, item, -1) |
70 | 74 | ||
71 | #undef TRACE_EVENT | 75 | #undef TRACE_EVENT |
72 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 76 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
73 | struct ftrace_str_offsets_##call { \ | 77 | struct ftrace_data_offsets_##call { \ |
74 | tstruct; \ | 78 | tstruct; \ |
75 | }; | 79 | }; |
76 | 80 | ||
@@ -119,8 +123,12 @@ | |||
119 | #undef TP_printk | 123 | #undef TP_printk |
120 | #define TP_printk(fmt, args...) fmt "\n", args | 124 | #define TP_printk(fmt, args...) fmt "\n", args |
121 | 125 | ||
126 | #undef __get_dynamic_array | ||
127 | #define __get_dynamic_array(field) \ | ||
128 | ((void *)__entry + __entry->__data_loc_##field) | ||
129 | |||
122 | #undef __get_str | 130 | #undef __get_str |
123 | #define __get_str(field) ((char *)__entry + __entry->__str_loc_##field) | 131 | #define __get_str(field) (char *)__get_dynamic_array(field) |
124 | 132 | ||
125 | #undef __print_flags | 133 | #undef __print_flags |
126 | #define __print_flags(flag, delim, flag_array...) \ | 134 | #define __print_flags(flag, delim, flag_array...) \ |
@@ -207,16 +215,19 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
207 | if (!ret) \ | 215 | if (!ret) \ |
208 | return 0; | 216 | return 0; |
209 | 217 | ||
210 | #undef __string | 218 | #undef __dynamic_array |
211 | #define __string(item, src) \ | 219 | #define __dynamic_array(type, item, len) \ |
212 | ret = trace_seq_printf(s, "\tfield: __str_loc " #item ";\t" \ | 220 | ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ |
213 | "offset:%u;tsize:%u;\n", \ | 221 | "offset:%u;\tsize:%u;\n", \ |
214 | (unsigned int)offsetof(typeof(field), \ | 222 | (unsigned int)offsetof(typeof(field), \ |
215 | __str_loc_##item), \ | 223 | __data_loc_##item), \ |
216 | (unsigned int)sizeof(field.__str_loc_##item)); \ | 224 | (unsigned int)sizeof(field.__data_loc_##item)); \ |
217 | if (!ret) \ | 225 | if (!ret) \ |
218 | return 0; | 226 | return 0; |
219 | 227 | ||
228 | #undef __string | ||
229 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
230 | |||
220 | #undef __entry | 231 | #undef __entry |
221 | #define __entry REC | 232 | #define __entry REC |
222 | 233 | ||
@@ -260,11 +271,14 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
260 | if (ret) \ | 271 | if (ret) \ |
261 | return ret; | 272 | return ret; |
262 | 273 | ||
274 | #undef __dynamic_array | ||
275 | #define __dynamic_array(type, item, len) \ | ||
276 | ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ | ||
277 | offsetof(typeof(field), __data_loc_##item), \ | ||
278 | sizeof(field.__data_loc_##item), 0); | ||
279 | |||
263 | #undef __string | 280 | #undef __string |
264 | #define __string(item, src) \ | 281 | #define __string(item, src) __dynamic_array(char, item, -1) |
265 | ret = trace_define_field(event_call, "__str_loc", #item, \ | ||
266 | offsetof(typeof(field), __str_loc_##item), \ | ||
267 | sizeof(field.__str_loc_##item), 0); | ||
268 | 282 | ||
269 | #undef TRACE_EVENT | 283 | #undef TRACE_EVENT |
270 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 284 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
@@ -289,6 +303,43 @@ ftrace_define_fields_##call(void) \ | |||
289 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 303 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
290 | 304 | ||
291 | /* | 305 | /* |
306 | * remember the offset of each array from the beginning of the event. | ||
307 | */ | ||
308 | |||
309 | #undef __entry | ||
310 | #define __entry entry | ||
311 | |||
312 | #undef __field | ||
313 | #define __field(type, item) | ||
314 | |||
315 | #undef __array | ||
316 | #define __array(type, item, len) | ||
317 | |||
318 | #undef __dynamic_array | ||
319 | #define __dynamic_array(type, item, len) \ | ||
320 | __data_offsets->item = __data_size + \ | ||
321 | offsetof(typeof(*entry), __data); \ | ||
322 | __data_size += (len) * sizeof(type); | ||
323 | |||
324 | #undef __string | ||
325 | #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ | ||
326 | |||
327 | #undef TRACE_EVENT | ||
328 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
329 | static inline int ftrace_get_offsets_##call( \ | ||
330 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ | ||
331 | { \ | ||
332 | int __data_size = 0; \ | ||
333 | struct ftrace_raw_##call __maybe_unused *entry; \ | ||
334 | \ | ||
335 | tstruct; \ | ||
336 | \ | ||
337 | return __data_size; \ | ||
338 | } | ||
339 | |||
340 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
341 | |||
342 | /* | ||
292 | * Stage 4 of the trace events. | 343 | * Stage 4 of the trace events. |
293 | * | 344 | * |
294 | * Override the macros in <trace/trace_events.h> to include the following: | 345 | * Override the macros in <trace/trace_events.h> to include the following: |
@@ -432,15 +483,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
432 | #undef __array | 483 | #undef __array |
433 | #define __array(type, item, len) | 484 | #define __array(type, item, len) |
434 | 485 | ||
486 | #undef __dynamic_array | ||
487 | #define __dynamic_array(type, item, len) \ | ||
488 | __entry->__data_loc_##item = __data_offsets.item; | ||
489 | |||
435 | #undef __string | 490 | #undef __string |
436 | #define __string(item, src) \ | 491 | #define __string(item, src) __dynamic_array(char, item, -1) \ |
437 | __str_offsets.item = __str_size + \ | ||
438 | offsetof(typeof(*entry), __str_data); \ | ||
439 | __str_size += strlen(src) + 1; | ||
440 | 492 | ||
441 | #undef __assign_str | 493 | #undef __assign_str |
442 | #define __assign_str(dst, src) \ | 494 | #define __assign_str(dst, src) \ |
443 | __entry->__str_loc_##dst = __str_offsets.dst; \ | ||
444 | strcpy(__get_str(dst), src); | 495 | strcpy(__get_str(dst), src); |
445 | 496 | ||
446 | #undef TRACE_EVENT | 497 | #undef TRACE_EVENT |
@@ -451,27 +502,30 @@ static struct ftrace_event_call event_##call; \ | |||
451 | \ | 502 | \ |
452 | static void ftrace_raw_event_##call(proto) \ | 503 | static void ftrace_raw_event_##call(proto) \ |
453 | { \ | 504 | { \ |
454 | struct ftrace_str_offsets_##call __maybe_unused __str_offsets; \ | 505 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
455 | struct ftrace_event_call *event_call = &event_##call; \ | 506 | struct ftrace_event_call *event_call = &event_##call; \ |
456 | struct ring_buffer_event *event; \ | 507 | struct ring_buffer_event *event; \ |
457 | struct ftrace_raw_##call *entry; \ | 508 | struct ftrace_raw_##call *entry; \ |
458 | unsigned long irq_flags; \ | 509 | unsigned long irq_flags; \ |
459 | int __str_size = 0; \ | 510 | int __data_size; \ |
460 | int pc; \ | 511 | int pc; \ |
461 | \ | 512 | \ |
462 | local_save_flags(irq_flags); \ | 513 | local_save_flags(irq_flags); \ |
463 | pc = preempt_count(); \ | 514 | pc = preempt_count(); \ |
464 | \ | 515 | \ |
465 | tstruct; \ | 516 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
466 | \ | 517 | \ |
467 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | 518 | event = trace_current_buffer_lock_reserve(event_##call.id, \ |
468 | sizeof(struct ftrace_raw_##call) + __str_size,\ | 519 | sizeof(*entry) + __data_size, \ |
469 | irq_flags, pc); \ | 520 | irq_flags, pc); \ |
470 | if (!event) \ | 521 | if (!event) \ |
471 | return; \ | 522 | return; \ |
472 | entry = ring_buffer_event_data(event); \ | 523 | entry = ring_buffer_event_data(event); \ |
473 | \ | 524 | \ |
474 | assign; \ | 525 | \ |
526 | tstruct \ | ||
527 | \ | ||
528 | { assign; } \ | ||
475 | \ | 529 | \ |
476 | if (!filter_current_check_discard(event_call, entry, event)) \ | 530 | if (!filter_current_check_discard(event_call, entry, event)) \ |
477 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | 531 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a508b9d2adb8..4a13e5a01ce3 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -56,6 +56,13 @@ config CONTEXT_SWITCH_TRACER | |||
56 | select MARKERS | 56 | select MARKERS |
57 | bool | 57 | bool |
58 | 58 | ||
59 | # All tracer options should select GENERIC_TRACER. For those options that are | ||
60 | # enabled by all tracers (context switch and event tracer) they select TRACING. | ||
61 | # This allows those options to appear when no other tracer is selected. But the | ||
62 | # options do not appear when something else selects it. We need the two options | ||
63 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the | ||
64 | # hidding of the automatic options options. | ||
65 | |||
59 | config TRACING | 66 | config TRACING |
60 | bool | 67 | bool |
61 | select DEBUG_FS | 68 | select DEBUG_FS |
@@ -66,6 +73,10 @@ config TRACING | |||
66 | select BINARY_PRINTF | 73 | select BINARY_PRINTF |
67 | select EVENT_TRACING | 74 | select EVENT_TRACING |
68 | 75 | ||
76 | config GENERIC_TRACER | ||
77 | bool | ||
78 | select TRACING | ||
79 | |||
69 | # | 80 | # |
70 | # Minimum requirements an architecture has to meet for us to | 81 | # Minimum requirements an architecture has to meet for us to |
71 | # be able to offer generic tracing facilities: | 82 | # be able to offer generic tracing facilities: |
@@ -95,7 +106,7 @@ config FUNCTION_TRACER | |||
95 | depends on HAVE_FUNCTION_TRACER | 106 | depends on HAVE_FUNCTION_TRACER |
96 | select FRAME_POINTER | 107 | select FRAME_POINTER |
97 | select KALLSYMS | 108 | select KALLSYMS |
98 | select TRACING | 109 | select GENERIC_TRACER |
99 | select CONTEXT_SWITCH_TRACER | 110 | select CONTEXT_SWITCH_TRACER |
100 | help | 111 | help |
101 | Enable the kernel to trace every kernel function. This is done | 112 | Enable the kernel to trace every kernel function. This is done |
@@ -126,7 +137,7 @@ config IRQSOFF_TRACER | |||
126 | depends on TRACE_IRQFLAGS_SUPPORT | 137 | depends on TRACE_IRQFLAGS_SUPPORT |
127 | depends on GENERIC_TIME | 138 | depends on GENERIC_TIME |
128 | select TRACE_IRQFLAGS | 139 | select TRACE_IRQFLAGS |
129 | select TRACING | 140 | select GENERIC_TRACER |
130 | select TRACER_MAX_TRACE | 141 | select TRACER_MAX_TRACE |
131 | help | 142 | help |
132 | This option measures the time spent in irqs-off critical | 143 | This option measures the time spent in irqs-off critical |
@@ -147,7 +158,7 @@ config PREEMPT_TRACER | |||
147 | default n | 158 | default n |
148 | depends on GENERIC_TIME | 159 | depends on GENERIC_TIME |
149 | depends on PREEMPT | 160 | depends on PREEMPT |
150 | select TRACING | 161 | select GENERIC_TRACER |
151 | select TRACER_MAX_TRACE | 162 | select TRACER_MAX_TRACE |
152 | help | 163 | help |
153 | This option measures the time spent in preemption off critical | 164 | This option measures the time spent in preemption off critical |
@@ -166,7 +177,7 @@ config PREEMPT_TRACER | |||
166 | config SYSPROF_TRACER | 177 | config SYSPROF_TRACER |
167 | bool "Sysprof Tracer" | 178 | bool "Sysprof Tracer" |
168 | depends on X86 | 179 | depends on X86 |
169 | select TRACING | 180 | select GENERIC_TRACER |
170 | select CONTEXT_SWITCH_TRACER | 181 | select CONTEXT_SWITCH_TRACER |
171 | help | 182 | help |
172 | This tracer provides the trace needed by the 'Sysprof' userspace | 183 | This tracer provides the trace needed by the 'Sysprof' userspace |
@@ -174,44 +185,33 @@ config SYSPROF_TRACER | |||
174 | 185 | ||
175 | config SCHED_TRACER | 186 | config SCHED_TRACER |
176 | bool "Scheduling Latency Tracer" | 187 | bool "Scheduling Latency Tracer" |
177 | select TRACING | 188 | select GENERIC_TRACER |
178 | select CONTEXT_SWITCH_TRACER | 189 | select CONTEXT_SWITCH_TRACER |
179 | select TRACER_MAX_TRACE | 190 | select TRACER_MAX_TRACE |
180 | help | 191 | help |
181 | This tracer tracks the latency of the highest priority task | 192 | This tracer tracks the latency of the highest priority task |
182 | to be scheduled in, starting from the point it has woken up. | 193 | to be scheduled in, starting from the point it has woken up. |
183 | 194 | ||
184 | config ENABLE_CONTEXT_SWITCH_TRACER | 195 | config ENABLE_DEFAULT_TRACERS |
185 | bool "Trace process context switches" | 196 | bool "Trace process context switches and events" |
186 | select TRACING | 197 | depends on !GENERIC_TRACER |
187 | select CONTEXT_SWITCH_TRACER | ||
188 | help | ||
189 | This tracer gets called from the context switch and records | ||
190 | all switching of tasks. | ||
191 | |||
192 | config ENABLE_EVENT_TRACING | ||
193 | bool "Trace various events in the kernel" | ||
194 | select TRACING | 198 | select TRACING |
195 | help | 199 | help |
196 | This tracer hooks to various trace points in the kernel | 200 | This tracer hooks to various trace points in the kernel |
197 | allowing the user to pick and choose which trace point they | 201 | allowing the user to pick and choose which trace point they |
198 | want to trace. | 202 | want to trace. It also includes the sched_switch tracer plugin. |
199 | |||
200 | Note, all tracers enable event tracing. This option is | ||
201 | only a convenience to enable event tracing when no other | ||
202 | tracers are selected. | ||
203 | 203 | ||
204 | config FTRACE_SYSCALLS | 204 | config FTRACE_SYSCALLS |
205 | bool "Trace syscalls" | 205 | bool "Trace syscalls" |
206 | depends on HAVE_FTRACE_SYSCALLS | 206 | depends on HAVE_FTRACE_SYSCALLS |
207 | select TRACING | 207 | select GENERIC_TRACER |
208 | select KALLSYMS | 208 | select KALLSYMS |
209 | help | 209 | help |
210 | Basic tracer to catch the syscall entry and exit events. | 210 | Basic tracer to catch the syscall entry and exit events. |
211 | 211 | ||
212 | config BOOT_TRACER | 212 | config BOOT_TRACER |
213 | bool "Trace boot initcalls" | 213 | bool "Trace boot initcalls" |
214 | select TRACING | 214 | select GENERIC_TRACER |
215 | select CONTEXT_SWITCH_TRACER | 215 | select CONTEXT_SWITCH_TRACER |
216 | help | 216 | help |
217 | This tracer helps developers to optimize boot times: it records | 217 | This tracer helps developers to optimize boot times: it records |
@@ -228,7 +228,7 @@ config BOOT_TRACER | |||
228 | 228 | ||
229 | config TRACE_BRANCH_PROFILING | 229 | config TRACE_BRANCH_PROFILING |
230 | bool | 230 | bool |
231 | select TRACING | 231 | select GENERIC_TRACER |
232 | 232 | ||
233 | choice | 233 | choice |
234 | prompt "Branch Profiling" | 234 | prompt "Branch Profiling" |
@@ -308,7 +308,7 @@ config BRANCH_TRACER | |||
308 | config POWER_TRACER | 308 | config POWER_TRACER |
309 | bool "Trace power consumption behavior" | 309 | bool "Trace power consumption behavior" |
310 | depends on X86 | 310 | depends on X86 |
311 | select TRACING | 311 | select GENERIC_TRACER |
312 | help | 312 | help |
313 | This tracer helps developers to analyze and optimize the kernels | 313 | This tracer helps developers to analyze and optimize the kernels |
314 | power management decisions, specifically the C-state and P-state | 314 | power management decisions, specifically the C-state and P-state |
@@ -342,14 +342,14 @@ config STACK_TRACER | |||
342 | config HW_BRANCH_TRACER | 342 | config HW_BRANCH_TRACER |
343 | depends on HAVE_HW_BRANCH_TRACER | 343 | depends on HAVE_HW_BRANCH_TRACER |
344 | bool "Trace hw branches" | 344 | bool "Trace hw branches" |
345 | select TRACING | 345 | select GENERIC_TRACER |
346 | help | 346 | help |
347 | This tracer records all branches on the system in a circular | 347 | This tracer records all branches on the system in a circular |
348 | buffer giving access to the last N branches for each cpu. | 348 | buffer giving access to the last N branches for each cpu. |
349 | 349 | ||
350 | config KMEMTRACE | 350 | config KMEMTRACE |
351 | bool "Trace SLAB allocations" | 351 | bool "Trace SLAB allocations" |
352 | select TRACING | 352 | select GENERIC_TRACER |
353 | help | 353 | help |
354 | kmemtrace provides tracing for slab allocator functions, such as | 354 | kmemtrace provides tracing for slab allocator functions, such as |
355 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | 355 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected |
@@ -369,7 +369,7 @@ config KMEMTRACE | |||
369 | 369 | ||
370 | config WORKQUEUE_TRACER | 370 | config WORKQUEUE_TRACER |
371 | bool "Trace workqueues" | 371 | bool "Trace workqueues" |
372 | select TRACING | 372 | select GENERIC_TRACER |
373 | help | 373 | help |
374 | The workqueue tracer provides some statistical informations | 374 | The workqueue tracer provides some statistical informations |
375 | about each cpu workqueue thread such as the number of the | 375 | about each cpu workqueue thread such as the number of the |
@@ -385,7 +385,7 @@ config BLK_DEV_IO_TRACE | |||
385 | select RELAY | 385 | select RELAY |
386 | select DEBUG_FS | 386 | select DEBUG_FS |
387 | select TRACEPOINTS | 387 | select TRACEPOINTS |
388 | select TRACING | 388 | select GENERIC_TRACER |
389 | select STACKTRACE | 389 | select STACKTRACE |
390 | help | 390 | help |
391 | Say Y here if you want to be able to trace the block layer actions | 391 | Say Y here if you want to be able to trace the block layer actions |
@@ -446,7 +446,7 @@ config FTRACE_SELFTEST | |||
446 | 446 | ||
447 | config FTRACE_STARTUP_TEST | 447 | config FTRACE_STARTUP_TEST |
448 | bool "Perform a startup test on ftrace" | 448 | bool "Perform a startup test on ftrace" |
449 | depends on TRACING | 449 | depends on GENERIC_TRACER |
450 | select FTRACE_SELFTEST | 450 | select FTRACE_SELFTEST |
451 | help | 451 | help |
452 | This option performs a series of startup tests on ftrace. On bootup | 452 | This option performs a series of startup tests on ftrace. On bootup |
@@ -457,7 +457,7 @@ config FTRACE_STARTUP_TEST | |||
457 | config MMIOTRACE | 457 | config MMIOTRACE |
458 | bool "Memory mapped IO tracing" | 458 | bool "Memory mapped IO tracing" |
459 | depends on HAVE_MMIOTRACE_SUPPORT && PCI | 459 | depends on HAVE_MMIOTRACE_SUPPORT && PCI |
460 | select TRACING | 460 | select GENERIC_TRACER |
461 | help | 461 | help |
462 | Mmiotrace traces Memory Mapped I/O access and is meant for | 462 | Mmiotrace traces Memory Mapped I/O access and is meant for |
463 | debugging and reverse engineering. It is called from the ioremap | 463 | debugging and reverse engineering. It is called from the ioremap |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 140699a9a8a7..d6973dfadb36 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <trace/events/sched.h> | 32 | #include <trace/events/sched.h> |
33 | 33 | ||
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | #include <asm/setup.h> | ||
35 | 36 | ||
36 | #include "trace_output.h" | 37 | #include "trace_output.h" |
37 | #include "trace_stat.h" | 38 | #include "trace_stat.h" |
@@ -598,7 +599,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip) | |||
598 | local_irq_save(flags); | 599 | local_irq_save(flags); |
599 | 600 | ||
600 | stat = &__get_cpu_var(ftrace_profile_stats); | 601 | stat = &__get_cpu_var(ftrace_profile_stats); |
601 | if (!stat->hash) | 602 | if (!stat->hash || !ftrace_profile_enabled) |
602 | goto out; | 603 | goto out; |
603 | 604 | ||
604 | rec = ftrace_find_profiled_func(stat, ip); | 605 | rec = ftrace_find_profiled_func(stat, ip); |
@@ -629,7 +630,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
629 | 630 | ||
630 | local_irq_save(flags); | 631 | local_irq_save(flags); |
631 | stat = &__get_cpu_var(ftrace_profile_stats); | 632 | stat = &__get_cpu_var(ftrace_profile_stats); |
632 | if (!stat->hash) | 633 | if (!stat->hash || !ftrace_profile_enabled) |
633 | goto out; | 634 | goto out; |
634 | 635 | ||
635 | calltime = trace->rettime - trace->calltime; | 636 | calltime = trace->rettime - trace->calltime; |
@@ -723,6 +724,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
723 | ftrace_profile_enabled = 1; | 724 | ftrace_profile_enabled = 1; |
724 | } else { | 725 | } else { |
725 | ftrace_profile_enabled = 0; | 726 | ftrace_profile_enabled = 0; |
727 | /* | ||
728 | * unregister_ftrace_profiler calls stop_machine | ||
729 | * so this acts like an synchronize_sched. | ||
730 | */ | ||
726 | unregister_ftrace_profiler(); | 731 | unregister_ftrace_profiler(); |
727 | } | 732 | } |
728 | } | 733 | } |
@@ -2369,6 +2374,45 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset) | |||
2369 | ftrace_set_regex(buf, len, reset, 0); | 2374 | ftrace_set_regex(buf, len, reset, 0); |
2370 | } | 2375 | } |
2371 | 2376 | ||
2377 | /* | ||
2378 | * command line interface to allow users to set filters on boot up. | ||
2379 | */ | ||
2380 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE | ||
2381 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | ||
2382 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | ||
2383 | |||
2384 | static int __init set_ftrace_notrace(char *str) | ||
2385 | { | ||
2386 | strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); | ||
2387 | return 1; | ||
2388 | } | ||
2389 | __setup("ftrace_notrace=", set_ftrace_notrace); | ||
2390 | |||
2391 | static int __init set_ftrace_filter(char *str) | ||
2392 | { | ||
2393 | strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); | ||
2394 | return 1; | ||
2395 | } | ||
2396 | __setup("ftrace_filter=", set_ftrace_filter); | ||
2397 | |||
2398 | static void __init set_ftrace_early_filter(char *buf, int enable) | ||
2399 | { | ||
2400 | char *func; | ||
2401 | |||
2402 | while (buf) { | ||
2403 | func = strsep(&buf, ","); | ||
2404 | ftrace_set_regex(func, strlen(func), 0, enable); | ||
2405 | } | ||
2406 | } | ||
2407 | |||
2408 | static void __init set_ftrace_early_filters(void) | ||
2409 | { | ||
2410 | if (ftrace_filter_buf[0]) | ||
2411 | set_ftrace_early_filter(ftrace_filter_buf, 1); | ||
2412 | if (ftrace_notrace_buf[0]) | ||
2413 | set_ftrace_early_filter(ftrace_notrace_buf, 0); | ||
2414 | } | ||
2415 | |||
2372 | static int | 2416 | static int |
2373 | ftrace_regex_release(struct inode *inode, struct file *file, int enable) | 2417 | ftrace_regex_release(struct inode *inode, struct file *file, int enable) |
2374 | { | 2418 | { |
@@ -2829,6 +2873,8 @@ void __init ftrace_init(void) | |||
2829 | if (ret) | 2873 | if (ret) |
2830 | pr_warning("Failed to register trace ftrace module notifier\n"); | 2874 | pr_warning("Failed to register trace ftrace module notifier\n"); |
2831 | 2875 | ||
2876 | set_ftrace_early_filters(); | ||
2877 | |||
2832 | return; | 2878 | return; |
2833 | failed: | 2879 | failed: |
2834 | ftrace_disabled = 1; | 2880 | ftrace_disabled = 1; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a3a8a87d7e91..cae34c69752f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2826,6 +2826,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2826 | /* trace pipe does not show start of buffer */ | 2826 | /* trace pipe does not show start of buffer */ |
2827 | cpumask_setall(iter->started); | 2827 | cpumask_setall(iter->started); |
2828 | 2828 | ||
2829 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
2830 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | ||
2831 | |||
2829 | iter->cpu_file = cpu_file; | 2832 | iter->cpu_file = cpu_file; |
2830 | iter->tr = &global_trace; | 2833 | iter->tr = &global_trace; |
2831 | mutex_init(&iter->mutex); | 2834 | mutex_init(&iter->mutex); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index a7430b16d243..db6e54bdb596 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -478,12 +478,12 @@ enum { | |||
478 | 478 | ||
479 | static int is_string_field(const char *type) | 479 | static int is_string_field(const char *type) |
480 | { | 480 | { |
481 | if (strstr(type, "__data_loc") && strstr(type, "char")) | ||
482 | return FILTER_DYN_STRING; | ||
483 | |||
481 | if (strchr(type, '[') && strstr(type, "char")) | 484 | if (strchr(type, '[') && strstr(type, "char")) |
482 | return FILTER_STATIC_STRING; | 485 | return FILTER_STATIC_STRING; |
483 | 486 | ||
484 | if (!strcmp(type, "__str_loc")) | ||
485 | return FILTER_DYN_STRING; | ||
486 | |||
487 | return 0; | 487 | return 0; |
488 | } | 488 | } |
489 | 489 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index c12d95db2f56..0fe3b223f7ed 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -17,6 +17,7 @@ | |||
17 | static DECLARE_RWSEM(trace_event_mutex); | 17 | static DECLARE_RWSEM(trace_event_mutex); |
18 | 18 | ||
19 | DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq); | 19 | DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq); |
20 | EXPORT_PER_CPU_SYMBOL(ftrace_event_seq); | ||
20 | 21 | ||
21 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | 22 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; |
22 | 23 | ||
@@ -250,6 +251,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | |||
250 | 251 | ||
251 | return p->buffer; | 252 | return p->buffer; |
252 | } | 253 | } |
254 | EXPORT_SYMBOL(ftrace_print_flags_seq); | ||
253 | 255 | ||
254 | const char * | 256 | const char * |
255 | ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | 257 | ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, |
@@ -275,6 +277,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |||
275 | 277 | ||
276 | return p->buffer; | 278 | return p->buffer; |
277 | } | 279 | } |
280 | EXPORT_SYMBOL(ftrace_print_symbols_seq); | ||
278 | 281 | ||
279 | #ifdef CONFIG_KRETPROBES | 282 | #ifdef CONFIG_KRETPROBES |
280 | static inline const char *kretprobed(const char *name) | 283 | static inline const char *kretprobed(const char *name) |