diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-03-06 11:21:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-06 11:59:12 -0500 |
commit | 769b0441f438c4bb4872cb8560eb6fe51bcc09ee (patch) | |
tree | 9908682dfd89e97c3097a7c3adcae35d821e1895 /kernel/trace | |
parent | 1ba28e02a18cbdbea123836f6c98efb09cbf59ec (diff) |
tracing/core: drop the old trace_printk() implementation in favour of trace_bprintk()
Impact: faster and lighter tracing
Now that we have trace_bprintk() which is faster and consume lesser
memory than trace_printk() and has the same purpose, we can now drop
the old implementation in favour of the binary one from trace_bprintk(),
which means we move all the implementation of trace_bprintk() to
trace_printk(), so the Api doesn't change except that we must now use
trace_seq_bprintk() to print the TRACE_PRINT entries.
Some changes result of this:
- Previously, trace_bprintk depended of a single tracer and couldn't
work without. This tracer has been dropped and the whole implementation
of trace_printk() (like the module formats management) is now integrated
in the tracing core (comes with CONFIG_TRACING), though we keep the file
trace_printk (previously trace_bprintk.c) where we can find the module
management. Thus we don't overflow trace.c
- changes some parts to use trace_seq_bprintk() to print TRACE_PRINT entries.
- change a bit trace_printk/trace_vprintk macros to support non-builtin formats
constants, and fix 'const' qualifiers warnings. But this is all transparent for
developers.
- etc...
V2:
- Rebase against last changes
- Fix mispell on the changelog
V3:
- Rebase against last changes (moving trace_printk() to kernel.h)
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1236356510-8381-5-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 7 | ||||
-rw-r--r-- | kernel/trace/Makefile | 2 | ||||
-rw-r--r-- | kernel/trace/trace.c | 212 | ||||
-rw-r--r-- | kernel/trace/trace.h | 14 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 70 | ||||
-rw-r--r-- | kernel/trace/trace_output.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c (renamed from kernel/trace/trace_bprintk.c) | 84 |
9 files changed, 126 insertions, 280 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index ad8d3617d0a6..8e4a2a61cd75 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -52,6 +52,7 @@ config TRACING | |||
52 | select STACKTRACE if STACKTRACE_SUPPORT | 52 | select STACKTRACE if STACKTRACE_SUPPORT |
53 | select TRACEPOINTS | 53 | select TRACEPOINTS |
54 | select NOP_TRACER | 54 | select NOP_TRACER |
55 | select BINARY_PRINTF | ||
55 | 56 | ||
56 | # | 57 | # |
57 | # Minimum requirements an architecture has to meet for us to | 58 | # Minimum requirements an architecture has to meet for us to |
@@ -97,12 +98,6 @@ config FUNCTION_GRAPH_TRACER | |||
97 | This is done by setting the current return address on the current | 98 | This is done by setting the current return address on the current |
98 | task structure into a stack of calls. | 99 | task structure into a stack of calls. |
99 | 100 | ||
100 | config TRACE_BPRINTK | ||
101 | bool "Binary printk for tracing" | ||
102 | default y | ||
103 | depends on TRACING | ||
104 | select BINARY_PRINTF | ||
105 | |||
106 | config IRQSOFF_TRACER | 101 | config IRQSOFF_TRACER |
107 | bool "Interrupts-off Latency Tracer" | 102 | bool "Interrupts-off Latency Tracer" |
108 | default n | 103 | default n |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 46557ef4c379..c7a2943796eb 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -22,7 +22,7 @@ obj-$(CONFIG_TRACING) += trace.o | |||
22 | obj-$(CONFIG_TRACING) += trace_clock.o | 22 | obj-$(CONFIG_TRACING) += trace_clock.o |
23 | obj-$(CONFIG_TRACING) += trace_output.o | 23 | obj-$(CONFIG_TRACING) += trace_output.o |
24 | obj-$(CONFIG_TRACING) += trace_stat.o | 24 | obj-$(CONFIG_TRACING) += trace_stat.o |
25 | obj-$(CONFIG_TRACE_BPRINTK) += trace_bprintk.o | 25 | obj-$(CONFIG_TRACING) += trace_printk.o |
26 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 26 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
27 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | 27 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o |
28 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | 28 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 46b3cd7a5752..cc94f8642485 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1169,6 +1169,67 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
1169 | } | 1169 | } |
1170 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1170 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1171 | 1171 | ||
1172 | |||
1173 | /** | ||
1174 | * trace_vprintk - write binary msg to tracing buffer | ||
1175 | * | ||
1176 | */ | ||
1177 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | ||
1178 | { | ||
1179 | static DEFINE_SPINLOCK(trace_buf_lock); | ||
1180 | static u32 trace_buf[TRACE_BUF_SIZE]; | ||
1181 | |||
1182 | struct ring_buffer_event *event; | ||
1183 | struct trace_array *tr = &global_trace; | ||
1184 | struct trace_array_cpu *data; | ||
1185 | struct print_entry *entry; | ||
1186 | unsigned long flags; | ||
1187 | int resched; | ||
1188 | int cpu, len = 0, size, pc; | ||
1189 | |||
1190 | if (unlikely(tracing_selftest_running || tracing_disabled)) | ||
1191 | return 0; | ||
1192 | |||
1193 | /* Don't pollute graph traces with trace_vprintk internals */ | ||
1194 | pause_graph_tracing(); | ||
1195 | |||
1196 | pc = preempt_count(); | ||
1197 | resched = ftrace_preempt_disable(); | ||
1198 | cpu = raw_smp_processor_id(); | ||
1199 | data = tr->data[cpu]; | ||
1200 | |||
1201 | if (unlikely(atomic_read(&data->disabled))) | ||
1202 | goto out; | ||
1203 | |||
1204 | spin_lock_irqsave(&trace_buf_lock, flags); | ||
1205 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1206 | |||
1207 | if (len > TRACE_BUF_SIZE || len < 0) | ||
1208 | goto out_unlock; | ||
1209 | |||
1210 | size = sizeof(*entry) + sizeof(u32) * len; | ||
1211 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, flags, pc); | ||
1212 | if (!event) | ||
1213 | goto out_unlock; | ||
1214 | entry = ring_buffer_event_data(event); | ||
1215 | entry->ip = ip; | ||
1216 | entry->depth = depth; | ||
1217 | entry->fmt = fmt; | ||
1218 | |||
1219 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | ||
1220 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1221 | |||
1222 | out_unlock: | ||
1223 | spin_unlock_irqrestore(&trace_buf_lock, flags); | ||
1224 | |||
1225 | out: | ||
1226 | ftrace_preempt_enable(resched); | ||
1227 | unpause_graph_tracing(); | ||
1228 | |||
1229 | return len; | ||
1230 | } | ||
1231 | EXPORT_SYMBOL_GPL(trace_vprintk); | ||
1232 | |||
1172 | enum trace_file_type { | 1233 | enum trace_file_type { |
1173 | TRACE_FILE_LAT_FMT = 1, | 1234 | TRACE_FILE_LAT_FMT = 1, |
1174 | TRACE_FILE_ANNOTATE = 2, | 1235 | TRACE_FILE_ANNOTATE = 2, |
@@ -1564,7 +1625,7 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | |||
1564 | 1625 | ||
1565 | trace_assign_type(field, entry); | 1626 | trace_assign_type(field, entry); |
1566 | 1627 | ||
1567 | ret = trace_seq_printf(s, "%s", field->buf); | 1628 | ret = trace_seq_bprintf(s, field->fmt, field->buf); |
1568 | if (!ret) | 1629 | if (!ret) |
1569 | return TRACE_TYPE_PARTIAL_LINE; | 1630 | return TRACE_TYPE_PARTIAL_LINE; |
1570 | 1631 | ||
@@ -3714,155 +3775,6 @@ static __init int tracer_init_debugfs(void) | |||
3714 | return 0; | 3775 | return 0; |
3715 | } | 3776 | } |
3716 | 3777 | ||
3717 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | ||
3718 | { | ||
3719 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | ||
3720 | static char trace_buf[TRACE_BUF_SIZE]; | ||
3721 | |||
3722 | struct ring_buffer_event *event; | ||
3723 | struct trace_array *tr = &global_trace; | ||
3724 | struct trace_array_cpu *data; | ||
3725 | int cpu, len = 0, size, pc; | ||
3726 | struct print_entry *entry; | ||
3727 | unsigned long irq_flags; | ||
3728 | |||
3729 | if (tracing_disabled || tracing_selftest_running) | ||
3730 | return 0; | ||
3731 | |||
3732 | pc = preempt_count(); | ||
3733 | preempt_disable_notrace(); | ||
3734 | cpu = raw_smp_processor_id(); | ||
3735 | data = tr->data[cpu]; | ||
3736 | |||
3737 | if (unlikely(atomic_read(&data->disabled))) | ||
3738 | goto out; | ||
3739 | |||
3740 | pause_graph_tracing(); | ||
3741 | raw_local_irq_save(irq_flags); | ||
3742 | __raw_spin_lock(&trace_buf_lock); | ||
3743 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
3744 | |||
3745 | len = min(len, TRACE_BUF_SIZE-1); | ||
3746 | trace_buf[len] = 0; | ||
3747 | |||
3748 | size = sizeof(*entry) + len + 1; | ||
3749 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | ||
3750 | if (!event) | ||
3751 | goto out_unlock; | ||
3752 | entry = ring_buffer_event_data(event); | ||
3753 | entry->ip = ip; | ||
3754 | entry->depth = depth; | ||
3755 | |||
3756 | memcpy(&entry->buf, trace_buf, len); | ||
3757 | entry->buf[len] = 0; | ||
3758 | ring_buffer_unlock_commit(tr->buffer, event); | ||
3759 | |||
3760 | out_unlock: | ||
3761 | __raw_spin_unlock(&trace_buf_lock); | ||
3762 | raw_local_irq_restore(irq_flags); | ||
3763 | unpause_graph_tracing(); | ||
3764 | out: | ||
3765 | preempt_enable_notrace(); | ||
3766 | |||
3767 | return len; | ||
3768 | } | ||
3769 | EXPORT_SYMBOL_GPL(trace_vprintk); | ||
3770 | |||
3771 | int __trace_printk(unsigned long ip, const char *fmt, ...) | ||
3772 | { | ||
3773 | int ret; | ||
3774 | va_list ap; | ||
3775 | |||
3776 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
3777 | return 0; | ||
3778 | |||
3779 | va_start(ap, fmt); | ||
3780 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
3781 | va_end(ap); | ||
3782 | return ret; | ||
3783 | } | ||
3784 | EXPORT_SYMBOL_GPL(__trace_printk); | ||
3785 | |||
3786 | int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) | ||
3787 | { | ||
3788 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
3789 | return 0; | ||
3790 | |||
3791 | return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
3792 | } | ||
3793 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | ||
3794 | |||
3795 | /** | ||
3796 | * trace_vbprintk - write binary msg to tracing buffer | ||
3797 | * | ||
3798 | * Caller must insure @fmt are valid when msg is in tracing buffer. | ||
3799 | */ | ||
3800 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | ||
3801 | { | ||
3802 | static DEFINE_SPINLOCK(trace_buf_lock); | ||
3803 | static u32 trace_buf[TRACE_BUF_SIZE]; | ||
3804 | |||
3805 | struct ring_buffer_event *event; | ||
3806 | struct trace_array *tr = &global_trace; | ||
3807 | struct trace_array_cpu *data; | ||
3808 | struct bprintk_entry *entry; | ||
3809 | unsigned long flags; | ||
3810 | int resched; | ||
3811 | int cpu, len = 0, size, pc; | ||
3812 | |||
3813 | if (tracing_disabled || !trace_bprintk_enable) | ||
3814 | return 0; | ||
3815 | |||
3816 | pc = preempt_count(); | ||
3817 | resched = ftrace_preempt_disable(); | ||
3818 | cpu = raw_smp_processor_id(); | ||
3819 | data = tr->data[cpu]; | ||
3820 | |||
3821 | if (unlikely(atomic_read(&data->disabled))) | ||
3822 | goto out; | ||
3823 | |||
3824 | spin_lock_irqsave(&trace_buf_lock, flags); | ||
3825 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
3826 | |||
3827 | if (len > TRACE_BUF_SIZE || len < 0) | ||
3828 | goto out_unlock; | ||
3829 | |||
3830 | size = sizeof(*entry) + sizeof(u32) * len; | ||
3831 | event = trace_buffer_lock_reserve(tr, TRACE_BPRINTK, size, flags, pc); | ||
3832 | if (!event) | ||
3833 | goto out_unlock; | ||
3834 | entry = ring_buffer_event_data(event); | ||
3835 | entry->ip = ip; | ||
3836 | entry->fmt = fmt; | ||
3837 | |||
3838 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | ||
3839 | ring_buffer_unlock_commit(tr->buffer, event); | ||
3840 | |||
3841 | out_unlock: | ||
3842 | spin_unlock_irqrestore(&trace_buf_lock, flags); | ||
3843 | |||
3844 | out: | ||
3845 | ftrace_preempt_enable(resched); | ||
3846 | |||
3847 | return len; | ||
3848 | } | ||
3849 | EXPORT_SYMBOL_GPL(trace_vbprintk); | ||
3850 | |||
3851 | int __trace_bprintk(unsigned long ip, const char *fmt, ...) | ||
3852 | { | ||
3853 | int ret; | ||
3854 | va_list ap; | ||
3855 | |||
3856 | if (!fmt) | ||
3857 | return 0; | ||
3858 | |||
3859 | va_start(ap, fmt); | ||
3860 | ret = trace_vbprintk(ip, fmt, ap); | ||
3861 | va_end(ap); | ||
3862 | return ret; | ||
3863 | } | ||
3864 | EXPORT_SYMBOL_GPL(__trace_bprintk); | ||
3865 | |||
3866 | static int trace_panic_handler(struct notifier_block *this, | 3778 | static int trace_panic_handler(struct notifier_block *this, |
3867 | unsigned long event, void *unused) | 3779 | unsigned long event, void *unused) |
3868 | { | 3780 | { |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 0f5077f8f957..6140922392c8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -20,7 +20,6 @@ enum trace_type { | |||
20 | TRACE_WAKE, | 20 | TRACE_WAKE, |
21 | TRACE_STACK, | 21 | TRACE_STACK, |
22 | TRACE_PRINT, | 22 | TRACE_PRINT, |
23 | TRACE_BPRINTK, | ||
24 | TRACE_SPECIAL, | 23 | TRACE_SPECIAL, |
25 | TRACE_MMIO_RW, | 24 | TRACE_MMIO_RW, |
26 | TRACE_MMIO_MAP, | 25 | TRACE_MMIO_MAP, |
@@ -120,16 +119,10 @@ struct userstack_entry { | |||
120 | */ | 119 | */ |
121 | struct print_entry { | 120 | struct print_entry { |
122 | struct trace_entry ent; | 121 | struct trace_entry ent; |
123 | unsigned long ip; | 122 | unsigned long ip; |
124 | int depth; | 123 | int depth; |
125 | char buf[]; | 124 | const char *fmt; |
126 | }; | 125 | u32 buf[]; |
127 | |||
128 | struct bprintk_entry { | ||
129 | struct trace_entry ent; | ||
130 | unsigned long ip; | ||
131 | const char *fmt; | ||
132 | u32 buf[]; | ||
133 | }; | 126 | }; |
134 | #ifdef CONFIG_TRACE_BPRINTK | 127 | #ifdef CONFIG_TRACE_BPRINTK |
135 | extern int trace_bprintk_enable; | 128 | extern int trace_bprintk_enable; |
@@ -296,7 +289,6 @@ extern void __ftrace_bad_type(void); | |||
296 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | 289 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
297 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 290 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
298 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 291 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
299 | IF_ASSIGN(var, ent, struct bprintk_entry, TRACE_BPRINTK);\ | ||
300 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | 292 | IF_ASSIGN(var, ent, struct special_entry, 0); \ |
301 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 293 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
302 | TRACE_MMIO_RW); \ | 294 | TRACE_MMIO_RW); \ |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index e527f2f66c73..453ebd3b636e 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -742,7 +742,11 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
742 | } | 742 | } |
743 | 743 | ||
744 | /* The comment */ | 744 | /* The comment */ |
745 | ret = trace_seq_printf(s, "/* %s", trace->buf); | 745 | ret = trace_seq_printf(s, "/* "); |
746 | if (!ret) | ||
747 | return TRACE_TYPE_PARTIAL_LINE; | ||
748 | |||
749 | ret = trace_seq_bprintf(s, trace->fmt, trace->buf); | ||
746 | if (!ret) | 750 | if (!ret) |
747 | return TRACE_TYPE_PARTIAL_LINE; | 751 | return TRACE_TYPE_PARTIAL_LINE; |
748 | 752 | ||
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index c401b908e805..23e346a734ca 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -254,15 +254,18 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) | |||
254 | { | 254 | { |
255 | struct trace_entry *entry = iter->ent; | 255 | struct trace_entry *entry = iter->ent; |
256 | struct print_entry *print = (struct print_entry *)entry; | 256 | struct print_entry *print = (struct print_entry *)entry; |
257 | const char *msg = print->buf; | ||
258 | struct trace_seq *s = &iter->seq; | 257 | struct trace_seq *s = &iter->seq; |
259 | unsigned long long t = ns2usecs(iter->ts); | 258 | unsigned long long t = ns2usecs(iter->ts); |
260 | unsigned long usec_rem = do_div(t, 1000000ULL); | 259 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
261 | unsigned secs = (unsigned long)t; | 260 | unsigned secs = (unsigned long)t; |
262 | int ret; | 261 | int ret; |
263 | 262 | ||
264 | /* The trailing newline must be in the message. */ | 263 | /* The trailing newline must be in the message. */ |
265 | ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); | 264 | ret = trace_seq_printf(s, "MARK %u.%06lu ", secs, usec_rem); |
265 | if (!ret) | ||
266 | return TRACE_TYPE_PARTIAL_LINE; | ||
267 | |||
268 | ret = trace_seq_bprintf(s, print->fmt, print->buf); | ||
266 | if (!ret) | 269 | if (!ret) |
267 | return TRACE_TYPE_PARTIAL_LINE; | 270 | return TRACE_TYPE_PARTIAL_LINE; |
268 | 271 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 4ab71201862e..ef8fd661b217 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -53,8 +53,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
53 | return len; | 53 | return len; |
54 | } | 54 | } |
55 | 55 | ||
56 | static int | 56 | int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) |
57 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | ||
58 | { | 57 | { |
59 | int len = (PAGE_SIZE - 1) - s->len; | 58 | int len = (PAGE_SIZE - 1) - s->len; |
60 | int ret; | 59 | int ret; |
@@ -834,54 +833,12 @@ static struct trace_event trace_user_stack_event = { | |||
834 | }; | 833 | }; |
835 | 834 | ||
836 | /* TRACE_PRINT */ | 835 | /* TRACE_PRINT */ |
837 | static enum print_line_t trace_print_print(struct trace_iterator *iter, | ||
838 | int flags) | ||
839 | { | ||
840 | struct print_entry *field; | ||
841 | struct trace_seq *s = &iter->seq; | ||
842 | |||
843 | trace_assign_type(field, iter->ent); | ||
844 | |||
845 | if (!seq_print_ip_sym(s, field->ip, flags)) | ||
846 | goto partial; | ||
847 | |||
848 | if (!trace_seq_printf(s, ": %s", field->buf)) | ||
849 | goto partial; | ||
850 | |||
851 | return TRACE_TYPE_HANDLED; | ||
852 | |||
853 | partial: | ||
854 | return TRACE_TYPE_PARTIAL_LINE; | ||
855 | } | ||
856 | |||
857 | static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) | ||
858 | { | ||
859 | struct print_entry *field; | ||
860 | |||
861 | trace_assign_type(field, iter->ent); | ||
862 | |||
863 | if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) | ||
864 | goto partial; | ||
865 | |||
866 | return TRACE_TYPE_HANDLED; | ||
867 | |||
868 | partial: | ||
869 | return TRACE_TYPE_PARTIAL_LINE; | ||
870 | } | ||
871 | |||
872 | static struct trace_event trace_print_event = { | ||
873 | .type = TRACE_PRINT, | ||
874 | .trace = trace_print_print, | ||
875 | .raw = trace_print_raw, | ||
876 | }; | ||
877 | |||
878 | /* TRACE_BPRINTK */ | ||
879 | static enum print_line_t | 836 | static enum print_line_t |
880 | trace_bprintk_print(struct trace_iterator *iter, int flags) | 837 | trace_print_print(struct trace_iterator *iter, int flags) |
881 | { | 838 | { |
882 | struct trace_entry *entry = iter->ent; | 839 | struct trace_entry *entry = iter->ent; |
883 | struct trace_seq *s = &iter->seq; | 840 | struct trace_seq *s = &iter->seq; |
884 | struct bprintk_entry *field; | 841 | struct print_entry *field; |
885 | 842 | ||
886 | trace_assign_type(field, entry); | 843 | trace_assign_type(field, entry); |
887 | 844 | ||
@@ -900,14 +857,13 @@ trace_bprintk_print(struct trace_iterator *iter, int flags) | |||
900 | return TRACE_TYPE_PARTIAL_LINE; | 857 | return TRACE_TYPE_PARTIAL_LINE; |
901 | } | 858 | } |
902 | 859 | ||
903 | static enum print_line_t | 860 | |
904 | trace_bprintk_raw(struct trace_iterator *iter, int flags) | 861 | static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) |
905 | { | 862 | { |
906 | struct trace_entry *entry = iter->ent; | 863 | struct print_entry *field; |
907 | struct trace_seq *s = &iter->seq; | 864 | struct trace_seq *s = &iter->seq; |
908 | struct bprintk_entry *field; | ||
909 | 865 | ||
910 | trace_assign_type(field, entry); | 866 | trace_assign_type(field, iter->ent); |
911 | 867 | ||
912 | if (!trace_seq_printf(s, ": %lx : ", field->ip)) | 868 | if (!trace_seq_printf(s, ": %lx : ", field->ip)) |
913 | goto partial; | 869 | goto partial; |
@@ -921,12 +877,11 @@ trace_bprintk_raw(struct trace_iterator *iter, int flags) | |||
921 | return TRACE_TYPE_PARTIAL_LINE; | 877 | return TRACE_TYPE_PARTIAL_LINE; |
922 | } | 878 | } |
923 | 879 | ||
924 | static struct trace_event trace_bprintk_event = { | 880 | |
925 | .type = TRACE_BPRINTK, | 881 | static struct trace_event trace_print_event = { |
926 | .trace = trace_bprintk_print, | 882 | .type = TRACE_PRINT, |
927 | .raw = trace_bprintk_raw, | 883 | .trace = trace_print_print, |
928 | .hex = trace_nop_print, | 884 | .raw = trace_print_raw, |
929 | .binary = trace_nop_print, | ||
930 | }; | 885 | }; |
931 | 886 | ||
932 | static struct trace_event *events[] __initdata = { | 887 | static struct trace_event *events[] __initdata = { |
@@ -937,7 +892,6 @@ static struct trace_event *events[] __initdata = { | |||
937 | &trace_stack_event, | 892 | &trace_stack_event, |
938 | &trace_user_stack_event, | 893 | &trace_user_stack_event, |
939 | &trace_print_event, | 894 | &trace_print_event, |
940 | &trace_bprintk_event, | ||
941 | NULL | 895 | NULL |
942 | }; | 896 | }; |
943 | 897 | ||
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 8a34d688ed63..3b90e6ade1aa 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -18,6 +18,8 @@ struct trace_event { | |||
18 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 18 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
19 | __attribute__ ((format (printf, 2, 3))); | 19 | __attribute__ ((format (printf, 2, 3))); |
20 | extern int | 20 | extern int |
21 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | ||
22 | extern int | ||
21 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | 23 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, |
22 | unsigned long sym_flags); | 24 | unsigned long sym_flags); |
23 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 25 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
diff --git a/kernel/trace/trace_bprintk.c b/kernel/trace/trace_printk.c index f4c245a5cd33..a50aea22e929 100644 --- a/kernel/trace/trace_bprintk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -21,31 +21,20 @@ | |||
21 | 21 | ||
22 | #ifdef CONFIG_MODULES | 22 | #ifdef CONFIG_MODULES |
23 | 23 | ||
24 | /* binary printk basic */ | ||
25 | static DEFINE_MUTEX(btrace_mutex); | ||
26 | /* | 24 | /* |
27 | * modules trace_bprintk()'s formats are autosaved in struct trace_bprintk_fmt | 25 | * modules trace_printk()'s formats are autosaved in struct trace_bprintk_fmt |
28 | * which are queued on trace_bprintk_fmt_list. | 26 | * which are queued on trace_bprintk_fmt_list. |
29 | */ | 27 | */ |
30 | static LIST_HEAD(trace_bprintk_fmt_list); | 28 | static LIST_HEAD(trace_bprintk_fmt_list); |
31 | 29 | ||
30 | /* serialize accesses to trace_bprintk_fmt_list */ | ||
31 | static DEFINE_MUTEX(btrace_mutex); | ||
32 | |||
32 | struct trace_bprintk_fmt { | 33 | struct trace_bprintk_fmt { |
33 | struct list_head list; | 34 | struct list_head list; |
34 | char fmt[0]; | 35 | char fmt[0]; |
35 | }; | 36 | }; |
36 | 37 | ||
37 | |||
38 | static inline void lock_btrace(void) | ||
39 | { | ||
40 | mutex_lock(&btrace_mutex); | ||
41 | } | ||
42 | |||
43 | static inline void unlock_btrace(void) | ||
44 | { | ||
45 | mutex_unlock(&btrace_mutex); | ||
46 | } | ||
47 | |||
48 | |||
49 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) | 38 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) |
50 | { | 39 | { |
51 | struct trace_bprintk_fmt *pos; | 40 | struct trace_bprintk_fmt *pos; |
@@ -60,7 +49,8 @@ static | |||
60 | void hold_module_trace_bprintk_format(const char **start, const char **end) | 49 | void hold_module_trace_bprintk_format(const char **start, const char **end) |
61 | { | 50 | { |
62 | const char **iter; | 51 | const char **iter; |
63 | lock_btrace(); | 52 | |
53 | mutex_lock(&btrace_mutex); | ||
64 | for (iter = start; iter < end; iter++) { | 54 | for (iter = start; iter < end; iter++) { |
65 | struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); | 55 | struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); |
66 | if (tb_fmt) { | 56 | if (tb_fmt) { |
@@ -77,7 +67,7 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) | |||
77 | } else | 67 | } else |
78 | *iter = NULL; | 68 | *iter = NULL; |
79 | } | 69 | } |
80 | unlock_btrace(); | 70 | mutex_unlock(&btrace_mutex); |
81 | } | 71 | } |
82 | 72 | ||
83 | static int module_trace_bprintk_format_notify(struct notifier_block *self, | 73 | static int module_trace_bprintk_format_notify(struct notifier_block *self, |
@@ -109,46 +99,40 @@ struct notifier_block module_trace_bprintk_format_nb = { | |||
109 | .notifier_call = module_trace_bprintk_format_notify, | 99 | .notifier_call = module_trace_bprintk_format_notify, |
110 | }; | 100 | }; |
111 | 101 | ||
112 | /* events tracer */ | 102 | int __trace_printk(unsigned long ip, const char *fmt, ...) |
113 | int trace_bprintk_enable; | 103 | { |
104 | int ret; | ||
105 | va_list ap; | ||
114 | 106 | ||
115 | static void start_bprintk_trace(struct trace_array *tr) | 107 | if (unlikely(!fmt)) |
116 | { | 108 | return 0; |
117 | tracing_reset_online_cpus(tr); | ||
118 | trace_bprintk_enable = 1; | ||
119 | } | ||
120 | 109 | ||
121 | static void stop_bprintk_trace(struct trace_array *tr) | 110 | if (!(trace_flags & TRACE_ITER_PRINTK)) |
122 | { | 111 | return 0; |
123 | trace_bprintk_enable = 0; | 112 | |
124 | tracing_reset_online_cpus(tr); | 113 | va_start(ap, fmt); |
114 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
115 | va_end(ap); | ||
116 | return ret; | ||
125 | } | 117 | } |
118 | EXPORT_SYMBOL_GPL(__trace_printk); | ||
126 | 119 | ||
127 | static int init_bprintk_trace(struct trace_array *tr) | 120 | int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) |
128 | { | 121 | { |
129 | start_bprintk_trace(tr); | 122 | if (unlikely(!fmt)) |
130 | return 0; | 123 | return 0; |
124 | |||
125 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
126 | return 0; | ||
127 | |||
128 | return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
131 | } | 129 | } |
130 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | ||
132 | 131 | ||
133 | static struct tracer bprintk_trace __read_mostly = | ||
134 | { | ||
135 | .name = "events", | ||
136 | .init = init_bprintk_trace, | ||
137 | .reset = stop_bprintk_trace, | ||
138 | .start = start_bprintk_trace, | ||
139 | .stop = stop_bprintk_trace, | ||
140 | }; | ||
141 | 132 | ||
142 | static __init int init_bprintk(void) | 133 | static __init int init_trace_printk(void) |
143 | { | 134 | { |
144 | int ret = register_module_notifier(&module_trace_bprintk_format_nb); | 135 | return register_module_notifier(&module_trace_bprintk_format_nb); |
145 | if (ret) | ||
146 | return ret; | ||
147 | |||
148 | ret = register_tracer(&bprintk_trace); | ||
149 | if (ret) | ||
150 | unregister_module_notifier(&module_trace_bprintk_format_nb); | ||
151 | return ret; | ||
152 | } | 136 | } |
153 | 137 | ||
154 | device_initcall(init_bprintk); | 138 | early_initcall(init_trace_printk); |