aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2009-03-06 11:21:47 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-06 11:59:11 -0500
commit1427cdf0592368bdec57276edaf714040ee8744f (patch)
tree4b214ee49643db383328cf53a31959eb0627a167 /kernel/trace
parent546e5354a6e4ec760ac03ef1148e9a4762abb5f5 (diff)
tracing: infrastructure for supporting binary record
Impact: save on memory for tracing Current tracers are typically using a struct(like struct ftrace_entry, struct ctx_switch_entry, struct special_entr etc...)to record a binary event. These structs can only record a their own kind of events. A new kind of tracer need a new struct and a lot of code too handle it. So we need a generic binary record for events. This infrastructure is for this purpose. [fweisbec@gmail.com: rebase against latest -tip, make it safe while sched tracing as reported by Steven Rostedt] Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace.c56
-rw-r--r--kernel/trace/trace.h12
-rw-r--r--kernel/trace/trace_bprintk.c87
-rw-r--r--kernel/trace/trace_output.c75
6 files changed, 237 insertions, 0 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 058d949a3214..ad8d3617d0a6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -97,6 +97,12 @@ config FUNCTION_GRAPH_TRACER
97 This is done by setting the current return address on the current 97 This is done by setting the current return address on the current
98 task structure into a stack of calls. 98 task structure into a stack of calls.
99 99
100config TRACE_BPRINTK
101 bool "Binary printk for tracing"
102 default y
103 depends on TRACING
104 select BINARY_PRINTF
105
100config IRQSOFF_TRACER 106config IRQSOFF_TRACER
101 bool "Interrupts-off Latency Tracer" 107 bool "Interrupts-off Latency Tracer"
102 default n 108 default n
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index f44736c7574a..46557ef4c379 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TRACING) += trace.o
22obj-$(CONFIG_TRACING) += trace_clock.o 22obj-$(CONFIG_TRACING) += trace_clock.o
23obj-$(CONFIG_TRACING) += trace_output.o 23obj-$(CONFIG_TRACING) += trace_output.o
24obj-$(CONFIG_TRACING) += trace_stat.o 24obj-$(CONFIG_TRACING) += trace_stat.o
25obj-$(CONFIG_TRACE_BPRINTK) += trace_bprintk.o
25obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 26obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
26obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o 27obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
27obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 28obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e6144acf2b75..ff53509e19f8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3792,6 +3792,62 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
3792} 3792}
3793EXPORT_SYMBOL_GPL(__ftrace_vprintk); 3793EXPORT_SYMBOL_GPL(__ftrace_vprintk);
3794 3794
3795/**
3796 * trace_vbprintk - write binary msg to tracing buffer
3797 *
3798 * Caller must insure @fmt are valid when msg is in tracing buffer.
3799 */
3800int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3801{
3802 static DEFINE_SPINLOCK(trace_buf_lock);
3803 static u32 trace_buf[TRACE_BUF_SIZE];
3804
3805 struct ring_buffer_event *event;
3806 struct trace_array *tr = &global_trace;
3807 struct trace_array_cpu *data;
3808 struct bprintk_entry *entry;
3809 unsigned long flags;
3810 int resched;
3811 int cpu, len = 0, size, pc;
3812
3813 if (tracing_disabled || !trace_bprintk_enable)
3814 return 0;
3815
3816 pc = preempt_count();
3817 resched = ftrace_preempt_disable();
3818 cpu = raw_smp_processor_id();
3819 data = tr->data[cpu];
3820
3821 if (unlikely(atomic_read(&data->disabled)))
3822 goto out;
3823
3824 spin_lock_irqsave(&trace_buf_lock, flags);
3825 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3826
3827 if (len > TRACE_BUF_SIZE || len < 0)
3828 goto out_unlock;
3829
3830 size = sizeof(*entry) + sizeof(u32) * len;
3831 event = trace_buffer_lock_reserve(tr, TRACE_BPRINTK, size, flags, pc);
3832 if (!event)
3833 goto out_unlock;
3834 entry = ring_buffer_event_data(event);
3835 entry->ip = ip;
3836 entry->fmt = fmt;
3837
3838 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
3839 ring_buffer_unlock_commit(tr->buffer, event);
3840
3841out_unlock:
3842 spin_unlock_irqrestore(&trace_buf_lock, flags);
3843
3844out:
3845 ftrace_preempt_enable(resched);
3846
3847 return len;
3848}
3849EXPORT_SYMBOL_GPL(trace_vbprintk);
3850
3795static int trace_panic_handler(struct notifier_block *this, 3851static int trace_panic_handler(struct notifier_block *this,
3796 unsigned long event, void *unused) 3852 unsigned long event, void *unused)
3797{ 3853{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8beff03fda68..0f5077f8f957 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -20,6 +20,7 @@ enum trace_type {
20 TRACE_WAKE, 20 TRACE_WAKE,
21 TRACE_STACK, 21 TRACE_STACK,
22 TRACE_PRINT, 22 TRACE_PRINT,
23 TRACE_BPRINTK,
23 TRACE_SPECIAL, 24 TRACE_SPECIAL,
24 TRACE_MMIO_RW, 25 TRACE_MMIO_RW,
25 TRACE_MMIO_MAP, 26 TRACE_MMIO_MAP,
@@ -124,6 +125,16 @@ struct print_entry {
124 char buf[]; 125 char buf[];
125}; 126};
126 127
128struct bprintk_entry {
129 struct trace_entry ent;
130 unsigned long ip;
131 const char *fmt;
132 u32 buf[];
133};
134#ifdef CONFIG_TRACE_BPRINTK
135extern int trace_bprintk_enable;
136#endif
137
127#define TRACE_OLD_SIZE 88 138#define TRACE_OLD_SIZE 88
128 139
129struct trace_field_cont { 140struct trace_field_cont {
@@ -285,6 +296,7 @@ extern void __ftrace_bad_type(void);
285 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 296 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
286 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 297 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
287 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 298 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
299 IF_ASSIGN(var, ent, struct bprintk_entry, TRACE_BPRINTK);\
288 IF_ASSIGN(var, ent, struct special_entry, 0); \ 300 IF_ASSIGN(var, ent, struct special_entry, 0); \
289 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 301 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
290 TRACE_MMIO_RW); \ 302 TRACE_MMIO_RW); \
diff --git a/kernel/trace/trace_bprintk.c b/kernel/trace/trace_bprintk.c
new file mode 100644
index 000000000000..1f8e532c3fb9
--- /dev/null
+++ b/kernel/trace/trace_bprintk.c
@@ -0,0 +1,87 @@
1/*
2 * trace binary printk
3 *
4 * Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com>
5 *
6 */
7#include <linux/kernel.h>
8#include <linux/ftrace.h>
9#include <linux/string.h>
10#include <linux/ctype.h>
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
16#include <linux/fs.h>
17#include <linux/marker.h>
18#include <linux/uaccess.h>
19
20#include "trace.h"
21
22/* binary printk basic */
23static DEFINE_MUTEX(btrace_mutex);
24static int btrace_metadata_count;
25
26static inline void lock_btrace(void)
27{
28 mutex_lock(&btrace_mutex);
29}
30
31static inline void unlock_btrace(void)
32{
33 mutex_unlock(&btrace_mutex);
34}
35
36static void get_btrace_metadata(void)
37{
38 lock_btrace();
39 btrace_metadata_count++;
40 unlock_btrace();
41}
42
43static void put_btrace_metadata(void)
44{
45 lock_btrace();
46 btrace_metadata_count--;
47 unlock_btrace();
48}
49
50/* events tracer */
51int trace_bprintk_enable;
52
53static void start_bprintk_trace(struct trace_array *tr)
54{
55 get_btrace_metadata();
56 tracing_reset_online_cpus(tr);
57 trace_bprintk_enable = 1;
58}
59
60static void stop_bprintk_trace(struct trace_array *tr)
61{
62 trace_bprintk_enable = 0;
63 tracing_reset_online_cpus(tr);
64 put_btrace_metadata();
65}
66
67static int init_bprintk_trace(struct trace_array *tr)
68{
69 start_bprintk_trace(tr);
70 return 0;
71}
72
73static struct tracer bprintk_trace __read_mostly =
74{
75 .name = "events",
76 .init = init_bprintk_trace,
77 .reset = stop_bprintk_trace,
78 .start = start_bprintk_trace,
79 .stop = stop_bprintk_trace,
80};
81
82static __init int init_bprintk(void)
83{
84 return register_tracer(&bprintk_trace);
85}
86
87device_initcall(init_bprintk);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 306fef84c503..4ab71201862e 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -53,6 +53,26 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
53 return len; 53 return len;
54} 54}
55 55
56static int
57trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
58{
59 int len = (PAGE_SIZE - 1) - s->len;
60 int ret;
61
62 if (!len)
63 return 0;
64
65 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
66
67 /* If we can't write it all, don't bother writing anything */
68 if (ret >= len)
69 return 0;
70
71 s->len += ret;
72
73 return len;
74}
75
56/** 76/**
57 * trace_seq_puts - trace sequence printing of simple string 77 * trace_seq_puts - trace sequence printing of simple string
58 * @s: trace sequence descriptor 78 * @s: trace sequence descriptor
@@ -855,6 +875,60 @@ static struct trace_event trace_print_event = {
855 .raw = trace_print_raw, 875 .raw = trace_print_raw,
856}; 876};
857 877
878/* TRACE_BPRINTK */
879static enum print_line_t
880trace_bprintk_print(struct trace_iterator *iter, int flags)
881{
882 struct trace_entry *entry = iter->ent;
883 struct trace_seq *s = &iter->seq;
884 struct bprintk_entry *field;
885
886 trace_assign_type(field, entry);
887
888 if (!seq_print_ip_sym(s, field->ip, flags))
889 goto partial;
890
891 if (!trace_seq_puts(s, ": "))
892 goto partial;
893
894 if (!trace_seq_bprintf(s, field->fmt, field->buf))
895 goto partial;
896
897 return TRACE_TYPE_HANDLED;
898
899 partial:
900 return TRACE_TYPE_PARTIAL_LINE;
901}
902
903static enum print_line_t
904trace_bprintk_raw(struct trace_iterator *iter, int flags)
905{
906 struct trace_entry *entry = iter->ent;
907 struct trace_seq *s = &iter->seq;
908 struct bprintk_entry *field;
909
910 trace_assign_type(field, entry);
911
912 if (!trace_seq_printf(s, ": %lx : ", field->ip))
913 goto partial;
914
915 if (!trace_seq_bprintf(s, field->fmt, field->buf))
916 goto partial;
917
918 return TRACE_TYPE_HANDLED;
919
920 partial:
921 return TRACE_TYPE_PARTIAL_LINE;
922}
923
924static struct trace_event trace_bprintk_event = {
925 .type = TRACE_BPRINTK,
926 .trace = trace_bprintk_print,
927 .raw = trace_bprintk_raw,
928 .hex = trace_nop_print,
929 .binary = trace_nop_print,
930};
931
858static struct trace_event *events[] __initdata = { 932static struct trace_event *events[] __initdata = {
859 &trace_fn_event, 933 &trace_fn_event,
860 &trace_ctx_event, 934 &trace_ctx_event,
@@ -863,6 +937,7 @@ static struct trace_event *events[] __initdata = {
863 &trace_stack_event, 937 &trace_stack_event,
864 &trace_user_stack_event, 938 &trace_user_stack_event,
865 &trace_print_event, 939 &trace_print_event,
940 &trace_bprintk_event,
866 NULL 941 NULL
867}; 942};
868 943