diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2010-06-03 06:26:24 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-07-20 22:05:34 -0400 |
commit | bc289ae98b75d93228d24f521ef02a076e506e94 (patch) | |
tree | 50d151d0fbde1b106932c0f80a2639839d261ca3 /include/trace/ftrace.h | |
parent | 985023dee6e212493831431ba2e3ce8918f001b2 (diff) |
tracing: Reduce latency and remove percpu trace_seq
__print_flags() and __print_symbolic() use percpu trace_seq:
1) Its memory is allocated at compile time, it wastes memory if we don't use tracing.
2) It is percpu data and it wastes more memory for multi-cpus system.
3) It disables preemption when it executes its core routine
"trace_seq_printf(s, "%s: ", #call);" and introduces latency.
So we move this trace_seq to struct trace_iterator.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4C078350.7090106@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 12 |
1 files changed, 3 insertions, 9 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 55c1fd1bbc3d..fb783d94fc54 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -145,7 +145,7 @@ | |||
145 | * struct trace_seq *s = &iter->seq; | 145 | * struct trace_seq *s = &iter->seq; |
146 | * struct ftrace_raw_<call> *field; <-- defined in stage 1 | 146 | * struct ftrace_raw_<call> *field; <-- defined in stage 1 |
147 | * struct trace_entry *entry; | 147 | * struct trace_entry *entry; |
148 | * struct trace_seq *p; | 148 | * struct trace_seq *p = &iter->tmp_seq; |
149 | * int ret; | 149 | * int ret; |
150 | * | 150 | * |
151 | * entry = iter->ent; | 151 | * entry = iter->ent; |
@@ -157,12 +157,10 @@ | |||
157 | * | 157 | * |
158 | * field = (typeof(field))entry; | 158 | * field = (typeof(field))entry; |
159 | * | 159 | * |
160 | * p = &get_cpu_var(ftrace_event_seq); | ||
161 | * trace_seq_init(p); | 160 | * trace_seq_init(p); |
162 | * ret = trace_seq_printf(s, "%s: ", <call>); | 161 | * ret = trace_seq_printf(s, "%s: ", <call>); |
163 | * if (ret) | 162 | * if (ret) |
164 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | 163 | * ret = trace_seq_printf(s, <TP_printk> "\n"); |
165 | * put_cpu(); | ||
166 | * if (!ret) | 164 | * if (!ret) |
167 | * return TRACE_TYPE_PARTIAL_LINE; | 165 | * return TRACE_TYPE_PARTIAL_LINE; |
168 | * | 166 | * |
@@ -216,7 +214,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |||
216 | struct trace_seq *s = &iter->seq; \ | 214 | struct trace_seq *s = &iter->seq; \ |
217 | struct ftrace_raw_##call *field; \ | 215 | struct ftrace_raw_##call *field; \ |
218 | struct trace_entry *entry; \ | 216 | struct trace_entry *entry; \ |
219 | struct trace_seq *p; \ | 217 | struct trace_seq *p = &iter->tmp_seq; \ |
220 | int ret; \ | 218 | int ret; \ |
221 | \ | 219 | \ |
222 | event = container_of(trace_event, struct ftrace_event_call, \ | 220 | event = container_of(trace_event, struct ftrace_event_call, \ |
@@ -231,12 +229,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |||
231 | \ | 229 | \ |
232 | field = (typeof(field))entry; \ | 230 | field = (typeof(field))entry; \ |
233 | \ | 231 | \ |
234 | p = &get_cpu_var(ftrace_event_seq); \ | ||
235 | trace_seq_init(p); \ | 232 | trace_seq_init(p); \ |
236 | ret = trace_seq_printf(s, "%s: ", event->name); \ | 233 | ret = trace_seq_printf(s, "%s: ", event->name); \ |
237 | if (ret) \ | 234 | if (ret) \ |
238 | ret = trace_seq_printf(s, print); \ | 235 | ret = trace_seq_printf(s, print); \ |
239 | put_cpu(); \ | ||
240 | if (!ret) \ | 236 | if (!ret) \ |
241 | return TRACE_TYPE_PARTIAL_LINE; \ | 237 | return TRACE_TYPE_PARTIAL_LINE; \ |
242 | \ | 238 | \ |
@@ -255,7 +251,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |||
255 | struct trace_seq *s = &iter->seq; \ | 251 | struct trace_seq *s = &iter->seq; \ |
256 | struct ftrace_raw_##template *field; \ | 252 | struct ftrace_raw_##template *field; \ |
257 | struct trace_entry *entry; \ | 253 | struct trace_entry *entry; \ |
258 | struct trace_seq *p; \ | 254 | struct trace_seq *p = &iter->tmp_seq; \ |
259 | int ret; \ | 255 | int ret; \ |
260 | \ | 256 | \ |
261 | entry = iter->ent; \ | 257 | entry = iter->ent; \ |
@@ -267,12 +263,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |||
267 | \ | 263 | \ |
268 | field = (typeof(field))entry; \ | 264 | field = (typeof(field))entry; \ |
269 | \ | 265 | \ |
270 | p = &get_cpu_var(ftrace_event_seq); \ | ||
271 | trace_seq_init(p); \ | 266 | trace_seq_init(p); \ |
272 | ret = trace_seq_printf(s, "%s: ", #call); \ | 267 | ret = trace_seq_printf(s, "%s: ", #call); \ |
273 | if (ret) \ | 268 | if (ret) \ |
274 | ret = trace_seq_printf(s, print); \ | 269 | ret = trace_seq_printf(s, print); \ |
275 | put_cpu(); \ | ||
276 | if (!ret) \ | 270 | if (!ret) \ |
277 | return TRACE_TYPE_PARTIAL_LINE; \ | 271 | return TRACE_TYPE_PARTIAL_LINE; \ |
278 | \ | 272 | \ |