diff options
author | Jason Baron <jbaron@redhat.com> | 2009-02-11 13:57:25 -0500 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2009-02-13 09:06:18 -0500 |
commit | b5f9fd0f8a05c9bafb91a9a85b9110938d8e585b (patch) | |
tree | 14e84d393f5dd1235fc6e256564d80e965daf3a7 /kernel/trace | |
parent | 45141d4667d208421ca787a3301542b6a5e0b112 (diff) |
tracing: convert c/p state power tracer to use tracepoints
Convert the c/p state "power" tracer to use tracepoints. Avoids a
function call when the tracer is disabled.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace_power.c | 173 |
1 files changed, 102 insertions, 71 deletions
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index b1d0d087d3a6..91ce672fb037 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -21,15 +21,116 @@ | |||
21 | static struct trace_array *power_trace; | 21 | static struct trace_array *power_trace; |
22 | static int __read_mostly trace_power_enabled; | 22 | static int __read_mostly trace_power_enabled; |
23 | 23 | ||
24 | static void probe_power_start(struct power_trace *it, unsigned int type, | ||
25 | unsigned int level) | ||
26 | { | ||
27 | if (!trace_power_enabled) | ||
28 | return; | ||
29 | |||
30 | memset(it, 0, sizeof(struct power_trace)); | ||
31 | it->state = level; | ||
32 | it->type = type; | ||
33 | it->stamp = ktime_get(); | ||
34 | } | ||
35 | |||
36 | |||
37 | static void probe_power_end(struct power_trace *it) | ||
38 | { | ||
39 | struct ring_buffer_event *event; | ||
40 | struct trace_power *entry; | ||
41 | struct trace_array_cpu *data; | ||
42 | struct trace_array *tr = power_trace; | ||
43 | |||
44 | if (!trace_power_enabled) | ||
45 | return; | ||
46 | |||
47 | preempt_disable(); | ||
48 | it->end = ktime_get(); | ||
49 | data = tr->data[smp_processor_id()]; | ||
50 | |||
51 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | ||
52 | sizeof(*entry), 0, 0); | ||
53 | if (!event) | ||
54 | goto out; | ||
55 | entry = ring_buffer_event_data(event); | ||
56 | entry->state_data = *it; | ||
57 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
58 | out: | ||
59 | preempt_enable(); | ||
60 | } | ||
61 | |||
62 | static void probe_power_mark(struct power_trace *it, unsigned int type, | ||
63 | unsigned int level) | ||
64 | { | ||
65 | struct ring_buffer_event *event; | ||
66 | struct trace_power *entry; | ||
67 | struct trace_array_cpu *data; | ||
68 | struct trace_array *tr = power_trace; | ||
69 | |||
70 | if (!trace_power_enabled) | ||
71 | return; | ||
72 | |||
73 | memset(it, 0, sizeof(struct power_trace)); | ||
74 | it->state = level; | ||
75 | it->type = type; | ||
76 | it->stamp = ktime_get(); | ||
77 | preempt_disable(); | ||
78 | it->end = it->stamp; | ||
79 | data = tr->data[smp_processor_id()]; | ||
80 | |||
81 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | ||
82 | sizeof(*entry), 0, 0); | ||
83 | if (!event) | ||
84 | goto out; | ||
85 | entry = ring_buffer_event_data(event); | ||
86 | entry->state_data = *it; | ||
87 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
88 | out: | ||
89 | preempt_enable(); | ||
90 | } | ||
91 | |||
92 | static int tracing_power_register(void) | ||
93 | { | ||
94 | int ret; | ||
95 | |||
96 | ret = register_trace_power_start(probe_power_start); | ||
97 | if (ret) { | ||
98 | pr_info("power trace: Couldn't activate tracepoint" | ||
99 | " probe to trace_power_start\n"); | ||
100 | return ret; | ||
101 | } | ||
102 | ret = register_trace_power_end(probe_power_end); | ||
103 | if (ret) { | ||
104 | pr_info("power trace: Couldn't activate tracepoint" | ||
105 | " probe to trace_power_end\n"); | ||
106 | goto fail_start; | ||
107 | } | ||
108 | ret = register_trace_power_mark(probe_power_mark); | ||
109 | if (ret) { | ||
110 | pr_info("power trace: Couldn't activate tracepoint" | ||
111 | " probe to trace_power_mark\n"); | ||
112 | goto fail_end; | ||
113 | } | ||
114 | return ret; | ||
115 | fail_end: | ||
116 | unregister_trace_power_end(probe_power_end); | ||
117 | fail_start: | ||
118 | unregister_trace_power_start(probe_power_start); | ||
119 | return ret; | ||
120 | } | ||
24 | 121 | ||
25 | static void start_power_trace(struct trace_array *tr) | 122 | static void start_power_trace(struct trace_array *tr) |
26 | { | 123 | { |
27 | trace_power_enabled = 1; | 124 | trace_power_enabled = 1; |
125 | tracing_power_register(); | ||
28 | } | 126 | } |
29 | 127 | ||
30 | static void stop_power_trace(struct trace_array *tr) | 128 | static void stop_power_trace(struct trace_array *tr) |
31 | { | 129 | { |
32 | trace_power_enabled = 0; | 130 | trace_power_enabled = 0; |
131 | unregister_trace_power_start(probe_power_start); | ||
132 | unregister_trace_power_end(probe_power_end); | ||
133 | unregister_trace_power_mark(probe_power_mark); | ||
33 | } | 134 | } |
34 | 135 | ||
35 | 136 | ||
@@ -39,6 +140,7 @@ static int power_trace_init(struct trace_array *tr) | |||
39 | power_trace = tr; | 140 | power_trace = tr; |
40 | 141 | ||
41 | trace_power_enabled = 1; | 142 | trace_power_enabled = 1; |
143 | tracing_power_register(); | ||
42 | 144 | ||
43 | for_each_cpu(cpu, cpu_possible_mask) | 145 | for_each_cpu(cpu, cpu_possible_mask) |
44 | tracing_reset(tr, cpu); | 146 | tracing_reset(tr, cpu); |
@@ -95,74 +197,3 @@ static int init_power_trace(void) | |||
95 | return register_tracer(&power_tracer); | 197 | return register_tracer(&power_tracer); |
96 | } | 198 | } |
97 | device_initcall(init_power_trace); | 199 | device_initcall(init_power_trace); |
98 | |||
99 | void trace_power_start(struct power_trace *it, unsigned int type, | ||
100 | unsigned int level) | ||
101 | { | ||
102 | if (!trace_power_enabled) | ||
103 | return; | ||
104 | |||
105 | memset(it, 0, sizeof(struct power_trace)); | ||
106 | it->state = level; | ||
107 | it->type = type; | ||
108 | it->stamp = ktime_get(); | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(trace_power_start); | ||
111 | |||
112 | |||
113 | void trace_power_end(struct power_trace *it) | ||
114 | { | ||
115 | struct ring_buffer_event *event; | ||
116 | struct trace_power *entry; | ||
117 | struct trace_array_cpu *data; | ||
118 | struct trace_array *tr = power_trace; | ||
119 | |||
120 | if (!trace_power_enabled) | ||
121 | return; | ||
122 | |||
123 | preempt_disable(); | ||
124 | it->end = ktime_get(); | ||
125 | data = tr->data[smp_processor_id()]; | ||
126 | |||
127 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | ||
128 | sizeof(*entry), 0, 0); | ||
129 | if (!event) | ||
130 | goto out; | ||
131 | entry = ring_buffer_event_data(event); | ||
132 | entry->state_data = *it; | ||
133 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
134 | out: | ||
135 | preempt_enable(); | ||
136 | } | ||
137 | EXPORT_SYMBOL_GPL(trace_power_end); | ||
138 | |||
139 | void trace_power_mark(struct power_trace *it, unsigned int type, | ||
140 | unsigned int level) | ||
141 | { | ||
142 | struct ring_buffer_event *event; | ||
143 | struct trace_power *entry; | ||
144 | struct trace_array_cpu *data; | ||
145 | struct trace_array *tr = power_trace; | ||
146 | |||
147 | if (!trace_power_enabled) | ||
148 | return; | ||
149 | |||
150 | memset(it, 0, sizeof(struct power_trace)); | ||
151 | it->state = level; | ||
152 | it->type = type; | ||
153 | it->stamp = ktime_get(); | ||
154 | preempt_disable(); | ||
155 | it->end = it->stamp; | ||
156 | data = tr->data[smp_processor_id()]; | ||
157 | |||
158 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | ||
159 | sizeof(*entry), 0, 0); | ||
160 | if (!event) | ||
161 | goto out; | ||
162 | entry = ring_buffer_event_data(event); | ||
163 | entry->state_data = *it; | ||
164 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
165 | out: | ||
166 | preempt_enable(); | ||
167 | } | ||
168 | EXPORT_SYMBOL_GPL(trace_power_mark); | ||