aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2009-03-06 11:21:47 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-06 11:59:11 -0500
commit1427cdf0592368bdec57276edaf714040ee8744f (patch)
tree4b214ee49643db383328cf53a31959eb0627a167 /kernel/trace/trace.c
parent546e5354a6e4ec760ac03ef1148e9a4762abb5f5 (diff)
tracing: infrastructure for supporting binary record
Impact: save on memory for tracing Current tracers are typically using a struct(like struct ftrace_entry, struct ctx_switch_entry, struct special_entr etc...)to record a binary event. These structs can only record a their own kind of events. A new kind of tracer need a new struct and a lot of code too handle it. So we need a generic binary record for events. This infrastructure is for this purpose. [fweisbec@gmail.com: rebase against latest -tip, make it safe while sched tracing as reported by Steven Rostedt] Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e6144acf2b75..ff53509e19f8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3792,6 +3792,62 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
3792} 3792}
3793EXPORT_SYMBOL_GPL(__ftrace_vprintk); 3793EXPORT_SYMBOL_GPL(__ftrace_vprintk);
3794 3794
3795/**
3796 * trace_vbprintk - write binary msg to tracing buffer
3797 *
3798 * Caller must insure @fmt are valid when msg is in tracing buffer.
3799 */
3800int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3801{
3802 static DEFINE_SPINLOCK(trace_buf_lock);
3803 static u32 trace_buf[TRACE_BUF_SIZE];
3804
3805 struct ring_buffer_event *event;
3806 struct trace_array *tr = &global_trace;
3807 struct trace_array_cpu *data;
3808 struct bprintk_entry *entry;
3809 unsigned long flags;
3810 int resched;
3811 int cpu, len = 0, size, pc;
3812
3813 if (tracing_disabled || !trace_bprintk_enable)
3814 return 0;
3815
3816 pc = preempt_count();
3817 resched = ftrace_preempt_disable();
3818 cpu = raw_smp_processor_id();
3819 data = tr->data[cpu];
3820
3821 if (unlikely(atomic_read(&data->disabled)))
3822 goto out;
3823
3824 spin_lock_irqsave(&trace_buf_lock, flags);
3825 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3826
3827 if (len > TRACE_BUF_SIZE || len < 0)
3828 goto out_unlock;
3829
3830 size = sizeof(*entry) + sizeof(u32) * len;
3831 event = trace_buffer_lock_reserve(tr, TRACE_BPRINTK, size, flags, pc);
3832 if (!event)
3833 goto out_unlock;
3834 entry = ring_buffer_event_data(event);
3835 entry->ip = ip;
3836 entry->fmt = fmt;
3837
3838 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
3839 ring_buffer_unlock_commit(tr->buffer, event);
3840
3841out_unlock:
3842 spin_unlock_irqrestore(&trace_buf_lock, flags);
3843
3844out:
3845 ftrace_preempt_enable(resched);
3846
3847 return len;
3848}
3849EXPORT_SYMBOL_GPL(trace_vbprintk);
3850
3795static int trace_panic_handler(struct notifier_block *this, 3851static int trace_panic_handler(struct notifier_block *this,
3796 unsigned long event, void *unused) 3852 unsigned long event, void *unused)
3797{ 3853{