diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-03-06 11:21:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-06 11:59:12 -0500 |
commit | 769b0441f438c4bb4872cb8560eb6fe51bcc09ee (patch) | |
tree | 9908682dfd89e97c3097a7c3adcae35d821e1895 /kernel/trace/trace.c | |
parent | 1ba28e02a18cbdbea123836f6c98efb09cbf59ec (diff) |
tracing/core: drop the old trace_printk() implementation in favour of trace_bprintk()
Impact: faster and lighter tracing
Now that we have trace_bprintk() which is faster and consume lesser
memory than trace_printk() and has the same purpose, we can now drop
the old implementation in favour of the binary one from trace_bprintk(),
which means we move all the implementation of trace_bprintk() to
trace_printk(), so the Api doesn't change except that we must now use
trace_seq_bprintk() to print the TRACE_PRINT entries.
Some changes result of this:
- Previously, trace_bprintk depended of a single tracer and couldn't
work without. This tracer has been dropped and the whole implementation
of trace_printk() (like the module formats management) is now integrated
in the tracing core (comes with CONFIG_TRACING), though we keep the file
trace_printk (previously trace_bprintk.c) where we can find the module
management. Thus we don't overflow trace.c
- changes some parts to use trace_seq_bprintk() to print TRACE_PRINT entries.
- change a bit trace_printk/trace_vprintk macros to support non-builtin formats
constants, and fix 'const' qualifiers warnings. But this is all transparent for
developers.
- etc...
V2:
- Rebase against last changes
- Fix mispell on the changelog
V3:
- Rebase against last changes (moving trace_printk() to kernel.h)
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1236356510-8381-5-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 212 |
1 files changed, 62 insertions, 150 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 46b3cd7a5752..cc94f8642485 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1169,6 +1169,67 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
1169 | } | 1169 | } |
1170 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1170 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1171 | 1171 | ||
1172 | |||
1173 | /** | ||
1174 | * trace_vprintk - write binary msg to tracing buffer | ||
1175 | * | ||
1176 | */ | ||
1177 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | ||
1178 | { | ||
1179 | static DEFINE_SPINLOCK(trace_buf_lock); | ||
1180 | static u32 trace_buf[TRACE_BUF_SIZE]; | ||
1181 | |||
1182 | struct ring_buffer_event *event; | ||
1183 | struct trace_array *tr = &global_trace; | ||
1184 | struct trace_array_cpu *data; | ||
1185 | struct print_entry *entry; | ||
1186 | unsigned long flags; | ||
1187 | int resched; | ||
1188 | int cpu, len = 0, size, pc; | ||
1189 | |||
1190 | if (unlikely(tracing_selftest_running || tracing_disabled)) | ||
1191 | return 0; | ||
1192 | |||
1193 | /* Don't pollute graph traces with trace_vprintk internals */ | ||
1194 | pause_graph_tracing(); | ||
1195 | |||
1196 | pc = preempt_count(); | ||
1197 | resched = ftrace_preempt_disable(); | ||
1198 | cpu = raw_smp_processor_id(); | ||
1199 | data = tr->data[cpu]; | ||
1200 | |||
1201 | if (unlikely(atomic_read(&data->disabled))) | ||
1202 | goto out; | ||
1203 | |||
1204 | spin_lock_irqsave(&trace_buf_lock, flags); | ||
1205 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1206 | |||
1207 | if (len > TRACE_BUF_SIZE || len < 0) | ||
1208 | goto out_unlock; | ||
1209 | |||
1210 | size = sizeof(*entry) + sizeof(u32) * len; | ||
1211 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, flags, pc); | ||
1212 | if (!event) | ||
1213 | goto out_unlock; | ||
1214 | entry = ring_buffer_event_data(event); | ||
1215 | entry->ip = ip; | ||
1216 | entry->depth = depth; | ||
1217 | entry->fmt = fmt; | ||
1218 | |||
1219 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | ||
1220 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1221 | |||
1222 | out_unlock: | ||
1223 | spin_unlock_irqrestore(&trace_buf_lock, flags); | ||
1224 | |||
1225 | out: | ||
1226 | ftrace_preempt_enable(resched); | ||
1227 | unpause_graph_tracing(); | ||
1228 | |||
1229 | return len; | ||
1230 | } | ||
1231 | EXPORT_SYMBOL_GPL(trace_vprintk); | ||
1232 | |||
1172 | enum trace_file_type { | 1233 | enum trace_file_type { |
1173 | TRACE_FILE_LAT_FMT = 1, | 1234 | TRACE_FILE_LAT_FMT = 1, |
1174 | TRACE_FILE_ANNOTATE = 2, | 1235 | TRACE_FILE_ANNOTATE = 2, |
@@ -1564,7 +1625,7 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | |||
1564 | 1625 | ||
1565 | trace_assign_type(field, entry); | 1626 | trace_assign_type(field, entry); |
1566 | 1627 | ||
1567 | ret = trace_seq_printf(s, "%s", field->buf); | 1628 | ret = trace_seq_bprintf(s, field->fmt, field->buf); |
1568 | if (!ret) | 1629 | if (!ret) |
1569 | return TRACE_TYPE_PARTIAL_LINE; | 1630 | return TRACE_TYPE_PARTIAL_LINE; |
1570 | 1631 | ||
@@ -3714,155 +3775,6 @@ static __init int tracer_init_debugfs(void) | |||
3714 | return 0; | 3775 | return 0; |
3715 | } | 3776 | } |
3716 | 3777 | ||
3717 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | ||
3718 | { | ||
3719 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | ||
3720 | static char trace_buf[TRACE_BUF_SIZE]; | ||
3721 | |||
3722 | struct ring_buffer_event *event; | ||
3723 | struct trace_array *tr = &global_trace; | ||
3724 | struct trace_array_cpu *data; | ||
3725 | int cpu, len = 0, size, pc; | ||
3726 | struct print_entry *entry; | ||
3727 | unsigned long irq_flags; | ||
3728 | |||
3729 | if (tracing_disabled || tracing_selftest_running) | ||
3730 | return 0; | ||
3731 | |||
3732 | pc = preempt_count(); | ||
3733 | preempt_disable_notrace(); | ||
3734 | cpu = raw_smp_processor_id(); | ||
3735 | data = tr->data[cpu]; | ||
3736 | |||
3737 | if (unlikely(atomic_read(&data->disabled))) | ||
3738 | goto out; | ||
3739 | |||
3740 | pause_graph_tracing(); | ||
3741 | raw_local_irq_save(irq_flags); | ||
3742 | __raw_spin_lock(&trace_buf_lock); | ||
3743 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
3744 | |||
3745 | len = min(len, TRACE_BUF_SIZE-1); | ||
3746 | trace_buf[len] = 0; | ||
3747 | |||
3748 | size = sizeof(*entry) + len + 1; | ||
3749 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | ||
3750 | if (!event) | ||
3751 | goto out_unlock; | ||
3752 | entry = ring_buffer_event_data(event); | ||
3753 | entry->ip = ip; | ||
3754 | entry->depth = depth; | ||
3755 | |||
3756 | memcpy(&entry->buf, trace_buf, len); | ||
3757 | entry->buf[len] = 0; | ||
3758 | ring_buffer_unlock_commit(tr->buffer, event); | ||
3759 | |||
3760 | out_unlock: | ||
3761 | __raw_spin_unlock(&trace_buf_lock); | ||
3762 | raw_local_irq_restore(irq_flags); | ||
3763 | unpause_graph_tracing(); | ||
3764 | out: | ||
3765 | preempt_enable_notrace(); | ||
3766 | |||
3767 | return len; | ||
3768 | } | ||
3769 | EXPORT_SYMBOL_GPL(trace_vprintk); | ||
3770 | |||
3771 | int __trace_printk(unsigned long ip, const char *fmt, ...) | ||
3772 | { | ||
3773 | int ret; | ||
3774 | va_list ap; | ||
3775 | |||
3776 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
3777 | return 0; | ||
3778 | |||
3779 | va_start(ap, fmt); | ||
3780 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
3781 | va_end(ap); | ||
3782 | return ret; | ||
3783 | } | ||
3784 | EXPORT_SYMBOL_GPL(__trace_printk); | ||
3785 | |||
3786 | int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) | ||
3787 | { | ||
3788 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
3789 | return 0; | ||
3790 | |||
3791 | return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
3792 | } | ||
3793 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | ||
3794 | |||
3795 | /** | ||
3796 | * trace_vbprintk - write binary msg to tracing buffer | ||
3797 | * | ||
3798 | * Caller must insure @fmt are valid when msg is in tracing buffer. | ||
3799 | */ | ||
3800 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | ||
3801 | { | ||
3802 | static DEFINE_SPINLOCK(trace_buf_lock); | ||
3803 | static u32 trace_buf[TRACE_BUF_SIZE]; | ||
3804 | |||
3805 | struct ring_buffer_event *event; | ||
3806 | struct trace_array *tr = &global_trace; | ||
3807 | struct trace_array_cpu *data; | ||
3808 | struct bprintk_entry *entry; | ||
3809 | unsigned long flags; | ||
3810 | int resched; | ||
3811 | int cpu, len = 0, size, pc; | ||
3812 | |||
3813 | if (tracing_disabled || !trace_bprintk_enable) | ||
3814 | return 0; | ||
3815 | |||
3816 | pc = preempt_count(); | ||
3817 | resched = ftrace_preempt_disable(); | ||
3818 | cpu = raw_smp_processor_id(); | ||
3819 | data = tr->data[cpu]; | ||
3820 | |||
3821 | if (unlikely(atomic_read(&data->disabled))) | ||
3822 | goto out; | ||
3823 | |||
3824 | spin_lock_irqsave(&trace_buf_lock, flags); | ||
3825 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
3826 | |||
3827 | if (len > TRACE_BUF_SIZE || len < 0) | ||
3828 | goto out_unlock; | ||
3829 | |||
3830 | size = sizeof(*entry) + sizeof(u32) * len; | ||
3831 | event = trace_buffer_lock_reserve(tr, TRACE_BPRINTK, size, flags, pc); | ||
3832 | if (!event) | ||
3833 | goto out_unlock; | ||
3834 | entry = ring_buffer_event_data(event); | ||
3835 | entry->ip = ip; | ||
3836 | entry->fmt = fmt; | ||
3837 | |||
3838 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | ||
3839 | ring_buffer_unlock_commit(tr->buffer, event); | ||
3840 | |||
3841 | out_unlock: | ||
3842 | spin_unlock_irqrestore(&trace_buf_lock, flags); | ||
3843 | |||
3844 | out: | ||
3845 | ftrace_preempt_enable(resched); | ||
3846 | |||
3847 | return len; | ||
3848 | } | ||
3849 | EXPORT_SYMBOL_GPL(trace_vbprintk); | ||
3850 | |||
3851 | int __trace_bprintk(unsigned long ip, const char *fmt, ...) | ||
3852 | { | ||
3853 | int ret; | ||
3854 | va_list ap; | ||
3855 | |||
3856 | if (!fmt) | ||
3857 | return 0; | ||
3858 | |||
3859 | va_start(ap, fmt); | ||
3860 | ret = trace_vbprintk(ip, fmt, ap); | ||
3861 | va_end(ap); | ||
3862 | return ret; | ||
3863 | } | ||
3864 | EXPORT_SYMBOL_GPL(__trace_bprintk); | ||
3865 | |||
3866 | static int trace_panic_handler(struct notifier_block *this, | 3778 | static int trace_panic_handler(struct notifier_block *this, |
3867 | unsigned long event, void *unused) | 3779 | unsigned long event, void *unused) |
3868 | { | 3780 | { |